Merge tag 'imx-fixes-6.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
authorArnd Bergmann <arnd@arndb.de>
Mon, 30 Jan 2023 16:44:27 +0000 (17:44 +0100)
committerArnd Bergmann <arnd@arndb.de>
Mon, 30 Jan 2023 16:44:27 +0000 (17:44 +0100)
i.MX fixes for 6.2, round 2:

- Update MAINTAINERS i.MX entry to match arm64 freescale DTS.
- Drop misused 'uart-has-rtscts' from imx8m-venice boards.
- Fix USB host over-current polarity for imx7d-smegw01 board.
- Fix a typo in i.MX8DXL sc_pwrkey property name.
- Fix GPIO watchdog property for i.MX8MM eDM SBC board.
- Keep Ethernet PHY powered on imx8mm-verdin to avoid kernel crash.
- Fix configuration of i.MX8MM pad UART1_DTE_RX.

* tag 'imx-fixes-6.2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux:
  ARM: dts: imx7d-smegw01: Fix USB host over-current polarity
  arm64: dts: imx8mm-verdin: Do not power down eth-phy
  MAINTAINERS: match freescale ARM64 DT directory in i.MX entry
  arm64: dts: imx8mm: Fix pad control for UART1_DTE_RX
  arm64: dts: freescale: imx8dxl: fix sc_pwrkey's property name linux,keycode
  arm64: dts: imx8m-venice: Remove incorrect 'uart-has-rtscts'
  arm64: dts: imx8mm: Reinstate GPIO watchdog always-running property on eDM SBC

Link: https://lore.kernel.org/r/20230130003614.GP20713@T480
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
1058 files changed:
.gitignore
.mailmap
Documentation/ABI/testing/debugfs-pktcdvd [new file with mode: 0644]
Documentation/ABI/testing/sysfs-class-pktcdvd [new file with mode: 0644]
Documentation/admin-guide/mm/zswap.rst
Documentation/arm64/silicon-errata.rst
Documentation/conf.py
Documentation/devicetree/bindings/cpufreq/cpufreq-qcom-hw.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-aes.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-sha.yaml
Documentation/devicetree/bindings/crypto/atmel,at91sam9g46-tdes.yaml
Documentation/devicetree/bindings/display/msm/dsi-controller-main.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-10nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-14nm.yaml
Documentation/devicetree/bindings/display/msm/dsi-phy-28nm.yaml
Documentation/devicetree/bindings/display/msm/qcom,qcm2290-mdss.yaml
Documentation/devicetree/bindings/display/msm/qcom,sm6115-mdss.yaml
Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
Documentation/devicetree/bindings/net/marvell,orion-mdio.yaml
Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml [deleted file]
Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml [deleted file]
Documentation/devicetree/bindings/phy/qcom,usb-hs-28nm.yaml
Documentation/devicetree/bindings/soc/qcom/qcom,apr-services.yaml
Documentation/devicetree/bindings/sound/mt8186-mt6366-rt1019-rt5682s.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-tx-macro.yaml
Documentation/devicetree/bindings/sound/qcom,lpass-wsa-macro.yaml
Documentation/devicetree/bindings/soundwire/qcom,soundwire.yaml
Documentation/devicetree/bindings/spi/atmel,at91rm9200-spi.yaml
Documentation/devicetree/bindings/spi/atmel,quadspi.yaml
Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
Documentation/filesystems/erofs.rst
Documentation/kbuild/makefiles.rst
Documentation/maintainer/maintainer-entry-profile.rst
Documentation/networking/rxrpc.rst
Documentation/nvme/feature-and-quirk-policy.rst [new file with mode: 0644]
Documentation/process/maintainer-netdev.rst
Documentation/sphinx/load_config.py
Documentation/virt/kvm/api.rst
Documentation/virt/kvm/locking.rst
MAINTAINERS
Makefile
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-39x.dtsi
arch/arm/boot/dts/aspeed-bmc-ibm-bonnell.dts
arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
arch/arm/boot/dts/qcom-apq8084.dtsi
arch/arm/boot/dts/sam9x60.dtsi
arch/arm/boot/dts/stm32mp151a-prtt1l.dtsi
arch/arm/boot/dts/stm32mp157c-emstamp-argon.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcom-som.dtsi
arch/arm/boot/dts/stm32mp15xx-dhcor-som.dtsi
arch/arm/include/asm/thread_info.h
arch/arm/mach-footbridge/isa-rtc.c
arch/arm/mach-omap1/Kconfig
arch/arm/mach-omap1/Makefile
arch/arm/mach-omap1/gpio15xx.c
arch/arm/mach-omap1/io.c
arch/arm/mach-omap1/mcbsp.c
arch/arm/mach-omap1/pm.h
arch/arm/mach-pxa/Kconfig
arch/arm64/Kconfig
arch/arm64/boot/dts/amlogic/meson-sm1-odroid-hc4.dts
arch/arm64/boot/dts/marvell/ac5-98dx25xx.dtsi
arch/arm64/boot/dts/qcom/msm8992-lg-bullhead.dtsi
arch/arm64/boot/dts/qcom/msm8992-xiaomi-libra.dts
arch/arm64/boot/dts/qcom/msm8992.dtsi
arch/arm64/boot/dts/qcom/msm8994-huawei-angler-rev-101.dts
arch/arm64/boot/dts/qcom/sc8280xp.dtsi
arch/arm64/boot/dts/qcom/sm8250.dtsi
arch/arm64/boot/dts/qcom/sm8350.dtsi
arch/arm64/crypto/sm4-ce-ccm-core.S
arch/arm64/crypto/sm4-ce-gcm-core.S
arch/arm64/include/asm/atomic_ll_sc.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/hugetlb.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/uprobes.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/elfcore.c
arch/arm64/kernel/fpsimd.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/signal.c
arch/arm64/kvm/hyp/include/hyp/fault.h
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/mmu.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-v3.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/mmu.c
arch/arm64/tools/cpucaps
arch/ia64/kernel/elfcore.c
arch/loongarch/include/asm/ftrace.h
arch/loongarch/include/asm/inst.h
arch/loongarch/include/asm/unwind.h
arch/loongarch/kernel/Makefile
arch/loongarch/kernel/alternative.c
arch/loongarch/kernel/cpu-probe.c
arch/loongarch/kernel/genex.S
arch/loongarch/kernel/inst.c
arch/loongarch/kernel/process.c
arch/loongarch/kernel/traps.c
arch/loongarch/kernel/unwind.c [new file with mode: 0644]
arch/loongarch/kernel/unwind_guess.c
arch/loongarch/kernel/unwind_prologue.c
arch/loongarch/mm/tlb.c
arch/mips/ralink/of.c
arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
arch/powerpc/boot/wrapper
arch/powerpc/include/asm/imc-pmu.h
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/mm/book3s64/hash_utils.c
arch/powerpc/perf/imc-pmu.c
arch/riscv/boot/dts/sifive/fu740-c000.dtsi
arch/riscv/include/asm/uaccess.h
arch/riscv/kernel/probes/simulate-insn.h
arch/s390/boot/decompressor.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/debug.h
arch/s390/include/asm/percpu.h
arch/s390/kernel/machine_kexec_file.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/setup.c
arch/s390/kernel/vmlinux.lds.S
arch/s390/kvm/interrupt.c
arch/sh/include/asm/pgtable-3level.h
arch/x86/boot/bioscall.S
arch/x86/coco/tdx/tdx.c
arch/x86/events/amd/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/uncore.c
arch/x86/events/msr.c
arch/x86/events/rapl.c
arch/x86/include/asm/insn-eval.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/callthunks.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/resctrl/monitor.c
arch/x86/kernel/cpu/resctrl/rdtgroup.c
arch/x86/kernel/crash.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kprobes/opt.c
arch/x86/kernel/sev.c
arch/x86/kvm/cpuid.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/irq_comm.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu/spte.h
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/xen.c
arch/x86/lib/insn-eval.c
arch/x86/lib/iomap_copy_64.S
arch/x86/mm/init.c
arch/x86/mm/pat/memtype.c
arch/x86/pci/mmconfig-shared.c
arch/x86/um/elfcore.c
arch/x86/xen/p2m.c
arch/xtensa/include/asm/processor.h
arch/xtensa/kernel/traps.c
arch/xtensa/mm/fault.c
block/Kconfig
block/bfq-cgroup.c
block/bfq-iosched.c
block/bfq-iosched.h
block/bio.c
block/blk-cgroup.c
block/blk-core.c
block/blk-merge.c
block/blk-mq.c
block/genhd.c
drivers/accessibility/speakup/spk_ttyio.c
drivers/acpi/acpi_video.c
drivers/acpi/glue.c
drivers/acpi/prmt.c
drivers/acpi/resource.c
drivers/acpi/scan.c
drivers/acpi/video_detect.c
drivers/acpi/x86/s2idle.c
drivers/ata/Kconfig
drivers/ata/ahci.c
drivers/base/property.c
drivers/base/test/test_async_driver_probe.c
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/drbd/drbd_req.c
drivers/block/pktcdvd.c [new file with mode: 0644]
drivers/block/ps3vram.c
drivers/block/rnbd/rnbd-clt.c
drivers/block/ublk_drv.c
drivers/block/virtio_blk.c
drivers/block/xen-blkback/xenbus.c
drivers/block/xen-blkfront.c
drivers/bluetooth/hci_qca.c
drivers/bus/sunxi-rsb.c
drivers/char/tpm/tpm-interface.c
drivers/char/tpm/xen-tpmfront.c
drivers/comedi/drivers/adv_pci1760.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/apple-soc-cpufreq.c
drivers/cpufreq/armada-37xx-cpufreq.c
drivers/cpufreq/cppc_cpufreq.c
drivers/cpufreq/cpufreq-dt-platdev.c
drivers/cpufreq/qcom-cpufreq-hw.c
drivers/crypto/atmel-ecc.c
drivers/crypto/atmel-i2c.c
drivers/crypto/atmel-i2c.h
drivers/crypto/caam/blob_gen.c
drivers/crypto/virtio/virtio_crypto_skcipher_algs.c
drivers/dma-buf/dma-buf-sysfs-stats.c
drivers/dma-buf/dma-buf-sysfs-stats.h
drivers/dma-buf/dma-buf.c
drivers/dma/dmaengine.c
drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
drivers/dma/idxd/device.c
drivers/dma/imx-sdma.c
drivers/dma/lgm/lgm-dma.c
drivers/dma/ptdma/ptdma-dev.c
drivers/dma/ptdma/ptdma.h
drivers/dma/qcom/gpi.c
drivers/dma/tegra186-gpc-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/k3-udma.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/edac_device.c
drivers/edac/edac_module.h
drivers/edac/highbank_mc_edac.c
drivers/firmware/arm_scmi/driver.c
drivers/firmware/arm_scmi/shmem.c
drivers/firmware/arm_scmi/virtio.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/runtime-wrappers.c
drivers/firmware/google/coreboot_table.c
drivers/firmware/google/coreboot_table.h
drivers/firmware/google/gsmi.c
drivers/firmware/psci/psci.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-mxc.c
drivers/gpio/gpio-pca953x.c
drivers/gpio/gpio-pmic-eic-sprd.c
drivers/gpio/gpio-sifive.c
drivers/gpio/gpio-sprd.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/drm_buddy.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_panel_orientation_quirks.c
drivers/gpu/drm/i915/display/intel_dsi_vbt.c
drivers/gpu/drm/i915/display/skl_universal_plane.c
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/selftests/huge_pages.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt_regs.h
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
drivers/gpu/drm/i915/gvt/debugfs.c
drivers/gpu/drm/i915/gvt/dmabuf.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gvt.h
drivers/gpu/drm/i915/gvt/interrupt.c
drivers/gpu/drm/i915/gvt/kvmgt.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/vgpu.c
drivers/gpu/drm/i915/i915_driver.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_evict.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pci.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/i915_switcheroo.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/imx/ipuv3-plane.c
drivers/gpu/drm/meson/meson_viu.c
drivers/gpu/drm/msm/adreno/a6xx_gmu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
drivers/gpu/drm/msm/adreno/adreno_device.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/adreno/adreno_gpu.h
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
drivers/gpu/drm/msm/dp/dp_aux.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h
drivers/gpu/drm/msm/msm_mdss.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c [deleted file]
drivers/gpu/drm/panfrost/Kconfig
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gem.c
drivers/gpu/drm/panfrost/panfrost_gem.h
drivers/gpu/drm/scheduler/sched_entity.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tests/Makefile
drivers/gpu/drm/tests/drm_mm_test.c
drivers/gpu/drm/ttm/ttm_bo_util.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/drm/virtio/virtgpu_ioctl.c
drivers/gpu/drm/virtio/virtgpu_object.c
drivers/gpu/drm/vmwgfx/ttm_object.c
drivers/gpu/drm/vmwgfx/ttm_object.h
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/hid/amd-sfh-hid/amd_sfh_client.c
drivers/hid/amd-sfh-hid/sfh1_1/amd_sfh_init.c
drivers/hid/hid-betopff.c
drivers/hid/hid-bigbenff.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-playstation.c
drivers/hid/hid-quirks.c
drivers/hid/hid-uclogic-core.c
drivers/hid/hid-uclogic-params.c
drivers/hid/intel-ish-hid/ishtp/dma-if.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/user_exp_rcv.c
drivers/infiniband/hw/hfi1/user_exp_rcv.h
drivers/infiniband/hw/mlx5/counters.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/sw/rxe/rxe_param.h
drivers/infiniband/sw/rxe/rxe_pool.c
drivers/infiniband/ulp/srp/ib_srp.h
drivers/input/misc/xen-kbdfront.c
drivers/interconnect/qcom/icc-rpm.c
drivers/interconnect/qcom/msm8996.c
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
drivers/iommu/arm/arm-smmu/arm-smmu.c
drivers/iommu/iommu.c
drivers/iommu/iova.c
drivers/iommu/mtk_iommu_v1.c
drivers/md/dm.c
drivers/md/md.c
drivers/memory/atmel-sdramc.c
drivers/memory/mvebu-devbus.c
drivers/memory/omap-gpmc.c
drivers/memory/tegra/tegra186.c
drivers/misc/fastrpc.c
drivers/misc/mei/bus.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/pci-me.c
drivers/misc/vmw_vmci/vmci_guest.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/mmc/host/sunxi-mmc.c
drivers/mtd/parsers/scpart.c
drivers/mtd/parsers/tplink_safeloader.c
drivers/mtd/spi-nor/core.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/microchip/ksz9477.c
drivers/net/dsa/mv88e6xxx/Kconfig
drivers/net/dsa/qca/qca8k-8xxx.c
drivers/net/dsa/qca/qca8k.h
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_ethtool.c
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_netdev.h
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-i2c.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/broadcom/bcm63xx_enet.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/enetc/enetc_ierb.c
drivers/net/ethernet/freescale/fman/fman_dtsec.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/intel/ice/ice_gnss.c
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/igc/igc_defines.h
drivers/net/ethernet/intel/igc/igc_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/devlink.c
drivers/net/ethernet/mellanox/mlx5/core/en/htb.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/qos.c
drivers/net/ethernet/mellanox/mlx5/core/qos.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
drivers/net/ethernet/microchip/lan966x/lan966x_port.c
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
drivers/net/ethernet/microchip/sparx5/sparx5_main.c
drivers/net/ethernet/netronome/nfp/nfp_net.h
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
drivers/net/ethernet/qlogic/qed/qed_debug.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ipa/data/ipa_data-v4.7.c
drivers/net/ipa/ipa_interrupt.c
drivers/net/ipa/ipa_interrupt.h
drivers/net/ipa/ipa_power.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/xilinx_gmii2rgmii.c
drivers/net/team/team.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/r8152.c
drivers/net/usb/rndis_host.c
drivers/net/usb/sr9700.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vrf.c
drivers/net/vxlan/vxlan_core.c
drivers/net/wan/fsl_ucc_hdlc.c
drivers/net/wireless/ath/ath9k/htc.h
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
drivers/net/wireless/mediatek/mt76/mt7996/Kconfig
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/rndis_wlan.c
drivers/net/wireless/ti/Makefile
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvme/host/apple.c
drivers/nvme/host/auth.c
drivers/nvme/host/core.c
drivers/nvme/host/ioctl.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/passthru.c
drivers/of/fdt.c
drivers/pci/controller/dwc/Kconfig
drivers/pci/xen-pcifront.c
drivers/phy/freescale/phy-fsl-imx8m-pcie.c
drivers/phy/phy-can-transceiver.c
drivers/phy/qualcomm/phy-qcom-usb-hs-28nm.c
drivers/phy/renesas/r8a779f0-ether-serdes.c
drivers/phy/rockchip/phy-rockchip-inno-usb2.c
drivers/phy/sunplus/phy-sunplus-usb2.c
drivers/phy/ti/Kconfig
drivers/pinctrl/nomadik/pinctrl-ab8500.c
drivers/pinctrl/nomadik/pinctrl-ab8505.c
drivers/pinctrl/nomadik/pinctrl-abx500.c
drivers/pinctrl/nomadik/pinctrl-abx500.h
drivers/pinctrl/nomadik/pinctrl-nomadik-db8500.c
drivers/pinctrl/nomadik/pinctrl-nomadik-stn8815.c
drivers/pinctrl/nomadik/pinctrl-nomadik.c
drivers/pinctrl/nomadik/pinctrl-nomadik.h
drivers/pinctrl/pinctrl-rockchip.c
drivers/pinctrl/sunplus/sppctl.c
drivers/platform/surface/aggregator/controller.c
drivers/platform/surface/aggregator/ssh_request_layer.c
drivers/platform/x86/amd/pmc.c
drivers/platform/x86/asus-nb-wmi.c
drivers/platform/x86/asus-wmi.c
drivers/platform/x86/asus-wmi.h
drivers/platform/x86/dell/dell-wmi-privacy.c
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/intel/int3472/clk_and_regulator.c
drivers/platform/x86/intel/int3472/discrete.c
drivers/platform/x86/intel/pmc/core.c
drivers/platform/x86/simatic-ipc.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/touchscreen_dmi.c
drivers/regulator/da9211-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/reset/Kconfig
drivers/reset/reset-uniphier-glue.c
drivers/s390/block/dcssblk.c
drivers/s390/net/qeth_core_sys.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/libsas/sas_ata.c
drivers/scsi/mpi3mr/Makefile
drivers/scsi/mpi3mr/mpi3mr_fw.c
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/storvsc_drv.c
drivers/scsi/xen-scsifront.c
drivers/soc/qcom/apr.c
drivers/soc/qcom/cpr.c
drivers/spi/spi-cadence-xspi.c
drivers/spi/spi-mt65xx.c
drivers/spi/spi.c
drivers/spi/spidev.c
drivers/staging/vc04_services/include/linux/raspberrypi/vchiq.h
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
drivers/thermal/intel/int340x_thermal/processor_thermal_rfim.c
drivers/thermal/thermal_core.c
drivers/thunderbolt/retimer.c
drivers/thunderbolt/tb.c
drivers/thunderbolt/tunnel.c
drivers/thunderbolt/xdomain.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/8250/8250_exar.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/serial_core.c
drivers/ufs/core/ufshcd.c
drivers/usb/cdns3/cdns3-gadget.c
drivers/usb/chipidea/core.c
drivers/usb/common/ulpi.c
drivers/usb/core/hub.c
drivers/usb/core/usb-acpi.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-xilinx.c
drivers/usb/dwc3/gadget.c
drivers/usb/fotg210/fotg210-core.c
drivers/usb/fotg210/fotg210-udc.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_ncm.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/legacy/webcam.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/xen-hcd.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/misc/iowarrior.c
drivers/usb/misc/onboard_usb_hub.c
drivers/usb/musb/omap2430.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/usb/storage/uas-detect.h
drivers/usb/storage/unusual_uas.h
drivers/usb/typec/altmodes/displayport.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/ucsi.c
drivers/usb/typec/ucsi/ucsi.h
drivers/vdpa/mlx5/core/mlx5_vdpa.h
drivers/vdpa/mlx5/core/mr.c
drivers/vdpa/mlx5/net/mlx5_vnet.c
drivers/vdpa/vdpa.c
drivers/vdpa/vdpa_sim/vdpa_sim.c
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
drivers/vdpa/vdpa_sim/vdpa_sim_net.c
drivers/vdpa/vdpa_user/vduse_dev.c
drivers/vdpa/virtio_pci/vp_vdpa.c
drivers/vhost/vdpa.c
drivers/vhost/vhost.c
drivers/vhost/vringh.c
drivers/vhost/vsock.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/aty/atyfb_base.c
drivers/video/fbdev/matrox/matroxfb_base.c
drivers/video/fbdev/omap/omapfb_main.c
drivers/video/fbdev/omap2/omapfb/dss/dsi.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio.c
drivers/virtio/virtio_pci_modern.c
drivers/virtio/virtio_ring.c
drivers/w1/w1.c
drivers/w1/w1_int.c
drivers/xen/pvcalls-back.c
drivers/xen/pvcalls-front.c
drivers/xen/xen-pciback/xenbus.c
drivers/xen/xen-scsiback.c
fs/affs/file.c
fs/afs/cmservice.c
fs/afs/rxrpc.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/btrfs/backref.c
fs/btrfs/bio.c
fs/btrfs/defrag.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-io-tree.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/fs.h
fs/btrfs/inode.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/space-info.c
fs/btrfs/super.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/zoned.c
fs/ceph/caps.c
fs/ceph/locks.c
fs/ceph/super.h
fs/cifs/cifsencrypt.c
fs/cifs/connect.c
fs/cifs/dfs.c
fs/cifs/dfs_cache.c
fs/cifs/dfs_cache.h
fs/cifs/link.c
fs/cifs/misc.c
fs/cifs/sess.c
fs/cifs/smb1ops.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/erofs/super.c
fs/erofs/zdata.c
fs/erofs/zmap.c
fs/ext4/xattr.c
fs/f2fs/data.c
fs/f2fs/extent_cache.c
fs/f2fs/file.c
fs/f2fs/segment.c
fs/hfs/inode.c
fs/ksmbd/auth.c
fs/ksmbd/connection.c
fs/ksmbd/smb2pdu.c
fs/ksmbd/transport_tcp.c
fs/nfs/dir.c
fs/nfs/filelayout/filelayout.c
fs/nfsd/filecache.c
fs/nfsd/filecache.h
fs/nfsd/netns.h
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfsctl.c
fs/nfsd/nfsd.h
fs/nfsd/nfsproc.c
fs/nfsd/nfssvc.c
fs/nfsd/trace.h
fs/nilfs2/btree.c
fs/ntfs3/file.c
fs/udf/inode.c
fs/userfaultfd.c
fs/xfs/libxfs/xfs_btree.c
fs/xfs/xfs_extent_busy.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_qm.c
fs/xfs/xfs_reflink.c
fs/zonefs/super.c
include/acpi/acpi_bus.h
include/acpi/video.h
include/asm-generic/vmlinux.lds.h
include/drm/drm_plane_helper.h
include/linux/bio.h
include/linux/blkdev.h
include/linux/bpf.h
include/linux/dsa/tag_qca.h
include/linux/elfcore.h
include/linux/firmware/xlnx-zynqmp.h
include/linux/fs.h
include/linux/io_uring_types.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_inline.h
include/linux/mm_types.h
include/linux/mtd/spi-nor.h
include/linux/netfilter/ipset/ip_set.h
include/linux/nvme.h
include/linux/page_ref.h
include/linux/phy.h
include/linux/pktcdvd.h [new file with mode: 0644]
include/linux/platform_data/x86/simatic-ipc.h
include/linux/soc/ti/omap1-io.h
include/linux/sunrpc/rpc_pipe_fs.h
include/linux/tpm_eventlog.h
include/linux/usb.h
include/net/af_rxrpc.h
include/net/inet_hashtables.h
include/net/inet_timewait_sock.h
include/net/mac80211.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/tc_wrapper.h
include/scsi/scsi_transport_iscsi.h
include/soc/bcm2835/raspberrypi-firmware.h
include/trace/events/btrfs.h
include/trace/events/rxrpc.h
include/uapi/linux/atmbr2684.h
include/uapi/linux/io_uring.h
include/uapi/linux/kvm.h
include/uapi/linux/pktcdvd.h [new file with mode: 0644]
include/uapi/linux/psci.h
include/uapi/linux/vdpa.h
include/xen/xenbus.h
init/Kconfig
init/Makefile
init/version-timestamp.c
io_uring/cancel.c
io_uring/fdinfo.c
io_uring/io-wq.c
io_uring/io_uring.c
io_uring/io_uring.h
io_uring/msg_ring.c
io_uring/poll.c
io_uring/rw.c
kernel/bpf/bpf_lsm.c
kernel/bpf/hashtab.c
kernel/bpf/offload.c
kernel/bpf/syscall.c
kernel/bpf/task_iter.c
kernel/bpf/trampoline.c
kernel/bpf/verifier.c
kernel/events/core.c
kernel/futex/syscalls.c
kernel/gen_kheaders.sh
kernel/kallsyms_selftest.c
kernel/kcsan/kcsan_test.c
kernel/locking/rtmutex.c
kernel/locking/rtmutex_api.c
kernel/printk/printk.c
kernel/sched/core.c
kernel/sys.c
kernel/time/tick-oneshot.c
kernel/time/time.c
kernel/time/timekeeping.c
kernel/trace/bpf_trace.c
lib/kunit/string-stream.c
lib/lockref.c
lib/scatterlist.c
lib/win_minmax.c
mm/hugetlb.c
mm/kasan/report.c
mm/khugepaged.c
mm/madvise.c
mm/memblock.c
mm/mmap.c
mm/nommu.c
mm/shmem.c
mm/slab.c
net/9p/trans_xen.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_event.c
net/bluetooth/hci_sync.c
net/bluetooth/iso.c
net/bluetooth/mgmt_util.h
net/bluetooth/rfcomm/sock.c
net/caif/cfctrl.c
net/core/filter.c
net/core/gro.c
net/ethtool/ioctl.c
net/ethtool/rss.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_hashtables.c
net/ipv4/inet_timewait_sock.c
net/ipv4/tcp.c
net/ipv4/tcp_ulp.c
net/ipv6/raw.c
net/l2tp/l2tp_core.c
net/mac80211/agg-tx.c
net/mac80211/cfg.c
net/mac80211/debugfs_sta.c
net/mac80211/driver-ops.c
net/mac80211/driver-ops.h
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/rx.c
net/mac80211/tx.c
net/mac80211/util.c
net/mptcp/pm.c
net/mptcp/pm_userspace.c
net/mptcp/protocol.c
net/mptcp/protocol.h
net/mptcp/subflow.c
net/netfilter/ipset/ip_set_bitmap_ip.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipmark.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/ipset/ip_set_hash_net.c
net/netfilter/ipset/ip_set_hash_netiface.c
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netport.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/nf_conntrack_proto.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_payload.c
net/nfc/llcp_core.c
net/nfc/netlink.c
net/openvswitch/datapath.c
net/rxrpc/Makefile
net/rxrpc/af_rxrpc.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_event.c
net/rxrpc/call_object.c
net/rxrpc/call_state.c [new file with mode: 0644]
net/rxrpc/conn_client.c
net/rxrpc/conn_event.c
net/rxrpc/conn_object.c
net/rxrpc/conn_service.c
net/rxrpc/input.c
net/rxrpc/insecure.c
net/rxrpc/io_thread.c
net/rxrpc/local_object.c
net/rxrpc/net_ns.c
net/rxrpc/output.c
net/rxrpc/peer_object.c
net/rxrpc/proc.c
net/rxrpc/recvmsg.c
net/rxrpc/rxkad.c
net/rxrpc/rxperf.c
net/rxrpc/security.c
net/rxrpc/sendmsg.c
net/sched/act_mpls.c
net/sched/cls_tcindex.c
net/sched/sch_api.c
net/sched/sch_atm.c
net/sched/sch_cbq.c
net/sched/sch_gred.c
net/sched/sch_htb.c
net/sched/sch_taprio.c
net/sunrpc/auth_gss/auth_gss.c
net/sunrpc/auth_gss/svcauth_gss.c
net/sunrpc/svc.c
net/sunrpc/svc_xprt.c
net/sunrpc/svcsock.c
net/sunrpc/xprtrdma/svc_rdma_transport.c
net/tipc/node.c
scripts/Makefile.modpost
scripts/Makefile.package
scripts/Makefile.vmlinux
scripts/basic/fixdep.c
scripts/jobserver-exec
scripts/kconfig/.gitignore
scripts/kconfig/Makefile
scripts/kconfig/mconf.c
scripts/package/mkspec
security/tomoyo/Kconfig
security/tomoyo/Makefile
sound/core/control.c
sound/core/control_led.c
sound/pci/hda/cs35l41_hda.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/rt9120.c
sound/soc/codecs/wm8904.c
sound/soc/fsl/fsl-asoc-card.c
sound/soc/fsl/fsl_micfil.c
sound/soc/fsl/fsl_ssi.c
sound/soc/intel/boards/Kconfig
sound/soc/intel/boards/sof_nau8825.c
sound/soc/intel/common/soc-acpi-intel-adl-match.c
sound/soc/intel/common/soc-acpi-intel-rpl-match.c
sound/soc/mediatek/Kconfig
sound/soc/mediatek/mt8186/mt8186-mt6366-rt1019-rt5682s.c
sound/soc/qcom/Kconfig
sound/soc/qcom/Makefile
sound/soc/qcom/common.c
sound/soc/qcom/common.h
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/sc8280xp.c
sound/soc/qcom/sdw.c [new file with mode: 0644]
sound/soc/qcom/sdw.h [new file with mode: 0644]
sound/soc/qcom/sm8250.c
sound/soc/sof/debug.c
sound/soc/sof/pm.c
sound/usb/implicit.c
sound/usb/line6/driver.c
sound/usb/line6/midi.c
sound/usb/line6/midibuf.c
sound/usb/line6/midibuf.h
sound/usb/line6/pod.c
sound/usb/pcm.c
sound/usb/stream.c
sound/xen/xen_snd_front.c
tools/arch/arm64/include/asm/cputype.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/kvm.h
tools/bpf/bpftool/Makefile
tools/include/linux/build_bug.h
tools/include/nolibc/arch-mips.h
tools/include/nolibc/arch-riscv.h
tools/include/nolibc/ctype.h
tools/include/nolibc/errno.h
tools/include/nolibc/signal.h
tools/include/nolibc/stdio.h
tools/include/nolibc/stdlib.h
tools/include/nolibc/string.h
tools/include/nolibc/sys.h
tools/include/nolibc/time.h
tools/include/nolibc/types.h
tools/include/nolibc/unistd.h
tools/include/uapi/linux/kvm.h
tools/objtool/check.c
tools/perf/Documentation/Makefile
tools/perf/Makefile.config
tools/perf/Makefile.perf
tools/perf/arch/riscv/util/unwind-libdw.c
tools/perf/builtin-kmem.c
tools/perf/builtin-lock.c
tools/perf/builtin-trace.c
tools/perf/command-list.txt
tools/perf/tests/bpf-script-test-prologue.c
tools/perf/tests/make
tools/perf/tests/shell/buildid.sh
tools/perf/tests/shell/record+probe_libc_inet_pton.sh
tools/perf/trace/beauty/include/linux/socket.h
tools/perf/util/PERF-VERSION-GEN
tools/perf/util/auxtrace.c
tools/perf/util/bpf_counter.h
tools/perf/util/bpf_counter_cgroup.c
tools/perf/util/build-id.c
tools/perf/util/cgroup.c
tools/perf/util/data.c
tools/perf/util/expr.l
tools/perf/util/generate-cmdlist.sh
tools/perf/util/sort.c
tools/perf/util/trace-event.h
tools/testing/memblock/.gitignore
tools/testing/memblock/Makefile
tools/testing/memblock/internal.h
tools/testing/selftests/bpf/.gitignore
tools/testing/selftests/bpf/DENYLIST.s390x
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
tools/testing/selftests/bpf/prog_tests/btf_dump.c
tools/testing/selftests/bpf/prog_tests/decap_sanity.c [new file with mode: 0644]
tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/bpf_tracing_net.h
tools/testing/selftests/bpf/progs/decap_sanity.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c [new file with mode: 0644]
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/aarch64/page_fault_test.c
tools/testing/selftests/kvm/lib/aarch64/ucall.c
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/ucall_common.c
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/memslot_perf_test.c
tools/testing/selftests/kvm/x86_64/hyperv_ipi.c
tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c
tools/testing/selftests/lib.mk
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/af_unix/test_unix_oob.c
tools/testing/selftests/net/arp_ndisc_evict_nocarrier.sh
tools/testing/selftests/net/bind_timewait.c [new file with mode: 0644]
tools/testing/selftests/net/cmsg_sender.c
tools/testing/selftests/net/l2_tos_ttl_inherit.sh
tools/testing/selftests/net/mptcp/userspace_pm.sh
tools/testing/selftests/net/toeplitz.c
tools/testing/selftests/netfilter/nft_trans_stress.sh
tools/testing/selftests/netfilter/settings [new file with mode: 0644]
tools/testing/selftests/proc/proc-empty-vm.c
tools/testing/selftests/proc/proc-pid-vm.c
tools/virtio/ringtest/main.h
tools/virtio/virtio-trace/trace-agent-ctl.c
tools/virtio/virtio_test.c
tools/virtio/vringh_test.c
virt/kvm/kvm_main.c
virt/kvm/kvm_mm.h

index 3ec73ead6757e28cdc61de24e0d556809155a64e..20dce5c3b9e0f2c124db43c2620c8c1dc0404569 100644 (file)
@@ -39,6 +39,7 @@
 *.o.*
 *.patch
 *.rmeta
+*.rpm
 *.rsi
 *.s
 *.so
index ccba4cf0d893849363c2d9a44d304d3f23f5de6a..8deff4cec169c509c7219a48a1c499a8e4febb70 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -371,6 +371,7 @@ Rémi Denis-Courmont <rdenis@simphalempin.com>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo@ribalda.com>
 Ricardo Ribalda <ribalda@kernel.org> Ricardo Ribalda Delgado <ribalda@kernel.org>
 Ricardo Ribalda <ribalda@kernel.org> <ricardo.ribalda@gmail.com>
+Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
 Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
 Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
 Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
@@ -422,6 +423,7 @@ Tony Luck <tony.luck@intel.com>
 TripleX Chung <xxx.phy@gmail.com> <triplex@zh-kernel.org>
 TripleX Chung <xxx.phy@gmail.com> <zhongyu@18mail.cn>
 Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
+Tudor Ambarus <tudor.ambarus@linaro.org> <tudor.ambarus@microchip.com>
 Tycho Andersen <tycho@tycho.pizza> <tycho@tycho.ws>
 Tzung-Bi Shih <tzungbi@kernel.org> <tzungbi@google.com>
 Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
diff --git a/Documentation/ABI/testing/debugfs-pktcdvd b/Documentation/ABI/testing/debugfs-pktcdvd
new file mode 100644 (file)
index 0000000..f6f65a4
--- /dev/null
@@ -0,0 +1,18 @@
+What:           /sys/kernel/debug/pktcdvd/pktcdvd[0-7]
+Date:           Oct. 2006
+KernelVersion:  2.6.20
+Contact:        Thomas Maier <balagi@justmail.de>
+Description:
+
+The pktcdvd module (packet writing driver) creates
+these files in debugfs:
+
+/sys/kernel/debug/pktcdvd/pktcdvd[0-7]/
+
+    ====            ====== ====================================
+    info            0444   Lots of driver statistics and infos.
+    ====            ====== ====================================
+
+Example::
+
+    cat /sys/kernel/debug/pktcdvd/pktcdvd0/info
diff --git a/Documentation/ABI/testing/sysfs-class-pktcdvd b/Documentation/ABI/testing/sysfs-class-pktcdvd
new file mode 100644 (file)
index 0000000..ba1ce62
--- /dev/null
@@ -0,0 +1,97 @@
+sysfs interface
+---------------
+The pktcdvd module (packet writing driver) creates the following files in the
+sysfs: (<devid> is in the format major:minor)
+
+What:          /sys/class/pktcdvd/add
+What:          /sys/class/pktcdvd/remove
+What:          /sys/class/pktcdvd/device_map
+Date:          Oct. 2006
+KernelVersion: 2.6.20
+Contact:       Thomas Maier <balagi@justmail.de>
+Description:
+
+               ==========      ==============================================
+               add             (WO) Write a block device id (major:minor) to
+                               create a new pktcdvd device and map it to the
+                               block device.
+
+               remove          (WO) Write the pktcdvd device id (major:minor)
+                               to remove the pktcdvd device.
+
+               device_map      (RO) Shows the device mapping in format:
+                               pktcdvd[0-7] <pktdevid> <blkdevid>
+               ==========      ==============================================
+
+
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/dev
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/uevent
+Date:          Oct. 2006
+KernelVersion: 2.6.20
+Contact:       Thomas Maier <balagi@justmail.de>
+Description:
+               dev:    (RO) Device id
+
+               uevent: (WO) To send a uevent
+
+
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_started
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/packets_finished
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_written
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/kb_read_gather
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/stat/reset
+Date:          Oct. 2006
+KernelVersion: 2.6.20
+Contact:       Thomas Maier <balagi@justmail.de>
+Description:
+               packets_started:        (RO) Number of started packets.
+
+               packets_finished:       (RO) Number of finished packets.
+
+               kb_written:             (RO) kBytes written.
+
+               kb_read:                (RO) kBytes read.
+
+               kb_read_gather:         (RO) kBytes read to fill write packets.
+
+               reset:                  (WO) Write any value to it to reset
+                                       pktcdvd device statistic values, like
+                                       bytes read/written.
+
+
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/size
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_off
+What:          /sys/class/pktcdvd/pktcdvd[0-7]/write_queue/congestion_on
+Date:          Oct. 2006
+KernelVersion: 2.6.20
+Contact:       Thomas Maier <balagi@justmail.de>
+Description:
+               ==============  ================================================
+               size            (RO) Contains the size of the bio write queue.
+
+               congestion_off  (RW) If bio write queue size is below this mark,
+                               accept new bio requests from the block layer.
+
+               congestion_on   (RW) If bio write queue size is higher as this
+                               mark, do no longer accept bio write requests
+                               from the block layer and wait till the pktcdvd
+                               device has processed enough bio's so that bio
+                               write queue size is below congestion off mark.
+                               A value of <= 0 disables congestion control.
+               ==============  ================================================
+
+
+Example:
+--------
+To use the pktcdvd sysfs interface directly, you can do::
+
+    # create a new pktcdvd device mapped to /dev/hdc
+    echo "22:0" >/sys/class/pktcdvd/add
+    cat /sys/class/pktcdvd/device_map
+    # assuming device pktcdvd0 was created, look at stat's
+    cat /sys/class/pktcdvd/pktcdvd0/stat/kb_written
+    # print the device id of the mapped block device
+    fgrep pktcdvd0 /sys/class/pktcdvd/device_map
+    # remove device, using pktcdvd0 device id   253:0
+    echo "253:0" >/sys/class/pktcdvd/remove
index f67de481c7f60559c169ac0ba7e8dcaff24b5da9..6dd74a18268baa6c74acfd55499ad769e04c12b2 100644 (file)
@@ -70,9 +70,7 @@ e.g. ``zswap.zpool=zbud``. It can also be changed at runtime using the sysfs
 The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
 means the compression ratio will always be 2:1 or worse (because of half-full
 zbud pages).  The zsmalloc type zpool has a more complex compressed page
-storage method, and it can achieve greater storage densities.  However,
-zsmalloc does not implement compressed page eviction, so once zswap fills it
-cannot evict the oldest page, it can only reject new pages.
+storage method, and it can achieve greater storage densities.
 
 When a swap page is passed from frontswap to zswap, zswap maintains a mapping
 of the swap entry, a combination of the swap type and swap offset, to the zpool
index 808ade4cc008ac7c41b0a13e5685fe5afae1dfe3..ec5f889d76819fd2fae733997d61ff1c33114ace 100644 (file)
@@ -120,6 +120,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-A710     | #2224489        | ARM64_ERRATUM_2224489       |
 +----------------+-----------------+-----------------+-----------------------------+
+| ARM            | Cortex-A715     | #2645198        | ARM64_ERRATUM_2645198       |
++----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2119858        | ARM64_ERRATUM_2119858       |
 +----------------+-----------------+-----------------+-----------------------------+
 | ARM            | Cortex-X2       | #2224489        | ARM64_ERRATUM_2224489       |
index a5c45df0bd83978028691abca71ab9d6c5155d8c..d927737e3c10fe207456f9eb360958c8b8a500a4 100644 (file)
@@ -31,6 +31,12 @@ def have_command(cmd):
 # Get Sphinx version
 major, minor, patch = sphinx.version_info[:3]
 
+#
+# Warn about older versions that we don't want to support for much
+# longer.
+#
+if (major < 2) or (major == 2 and minor < 4):
+    print('WARNING: support for Sphinx < 2.4 will be removed soon.')
 
 # If extensions (or modules to document with autodoc) are in another directory,
 # add these directories to sys.path here. If the directory is relative to the
@@ -339,7 +345,11 @@ html_use_smartypants = False
 
 # Custom sidebar templates, maps document names to template names.
 # Note that the RTD theme ignores this
-html_sidebars = { '**': ["about.html", 'searchbox.html', 'localtoc.html', 'sourcelink.html']}
+html_sidebars = { '**': ['searchbox.html', 'localtoc.html', 'sourcelink.html']}
+
+# about.html is available for alabaster theme. Add it at the front.
+if html_theme == 'alabaster':
+    html_sidebars['**'].insert(0, 'about.html')
 
 # Output file base name for HTML help builder.
 htmlhelp_basename = 'TheLinuxKerneldoc'
index 903b31129f012b1d755ab612e1ec804ca3407f87..99e159bc5fb13b3f141015621b91249bce20dc1a 100644 (file)
@@ -54,6 +54,17 @@ properties:
       - const: xo
       - const: alternate
 
+  interrupts:
+    minItems: 1
+    maxItems: 3
+
+  interrupt-names:
+    minItems: 1
+    items:
+      - const: dcvsh-irq-0
+      - const: dcvsh-irq-1
+      - const: dcvsh-irq-2
+
   '#freq-domain-cells':
     const: 1
 
index 0ccaab16dc614ea6c72cd67279b945ed8a117b41..0b7383b3106b51aa937bb25f569d78a5bea474d7 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Advanced Encryption Standard (AES) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index 5163c51b4547b512f9aace6777d7d4be5ce7fec7..ee2ffb0343251f5a2ce7c4d94286ae898db43baa 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Secure Hash Algorithm (SHA) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index fcc5adf03cadba45800bc1082f5e51a20adddadb..3d6ed24b1b006480d9e4d86793078b9a5422eb72 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Triple Data Encryption Standard (TDES) HW cryptographic accelerator
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 properties:
   compatible:
index f2c143730a551fe8b6f68dd35fc18a3c6a633b22..6e2fd6e9fa7f0aec445c2d04dfecc892d1806dfc 100644 (file)
@@ -32,7 +32,7 @@ properties:
       - description: Display byte clock
       - description: Display byte interface clock
       - description: Display pixel clock
-      - description: Display escape clock
+      - description: Display core clock
       - description: Display AHB clock
       - description: Display AXI clock
 
@@ -137,8 +137,6 @@ required:
   - phys
   - assigned-clocks
   - assigned-clock-parents
-  - power-domains
-  - operating-points-v2
   - ports
 
 additionalProperties: false
index d9ad8b659f58ee0f5bf3b64b696beb64914c9609..3ec466c3ab38bfe31588b5154a675dd5af5126b3 100644 (file)
@@ -69,7 +69,6 @@ required:
   - compatible
   - reg
   - reg-names
-  - vdds-supply
 
 unevaluatedProperties: false
 
index 819de5ce0bc9170b8949f21724172da863ced56a..a43e11d3b00d2cfe74b24187114415336d876b2e 100644 (file)
@@ -39,7 +39,6 @@ required:
   - compatible
   - reg
   - reg-names
-  - vcca-supply
 
 unevaluatedProperties: false
 
index 3d8540a06fe22a4865f9d45b2bcc3b8e009a646f..2f1fd140c87df95ec60fe37614b724e97ba3395c 100644 (file)
@@ -34,6 +34,10 @@ properties:
   vddio-supply:
     description: Phandle to vdd-io regulator device node.
 
+  qcom,dsi-phy-regulator-ldo-mode:
+    type: boolean
+    description: Indicates if the LDO mode PHY regulator is wanted.
+
 required:
   - compatible
   - reg
index d6f043a4b08d2dbf86108d15168510dacd78e4f3..4795e13c7b597baaffa5a30db4df769bc51a3cfd 100644 (file)
@@ -72,7 +72,7 @@ examples:
     #include <dt-bindings/interconnect/qcom,qcm2290.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,qcm2290-mdss";
index a86d7f53fa84dfcf2c8fe12d12c38a7c5d07757e..886858ef67000537e05e031650dfd897eb9311cc 100644 (file)
@@ -62,7 +62,7 @@ examples:
     #include <dt-bindings/interrupt-controller/arm-gic.h>
     #include <dt-bindings/power/qcom-rpmpd.h>
 
-    mdss@5e00000 {
+    display-subsystem@5e00000 {
         #address-cells = <1>;
         #size-cells = <1>;
         compatible = "qcom,sm6115-mdss";
index 4b37aa88a375b12ebffd09f43e6539a61cfb9c65..5e6be4e79201eeb09903ea8a1af78ffab305f44f 100644 (file)
@@ -84,7 +84,6 @@ allOf:
               - qcom,msm8939-pcnoc
               - qcom,msm8939-snoc
               - qcom,msm8996-a1noc
-              - qcom,msm8996-a2noc
               - qcom,msm8996-bimc
               - qcom,msm8996-cnoc
               - qcom,msm8996-pnoc
@@ -186,6 +185,29 @@ allOf:
       required:
         - power-domains
 
+  - if:
+      properties:
+        compatible:
+          contains:
+            enum:
+              - qcom,msm8996-a2noc
+
+    then:
+      properties:
+        clock-names:
+          items:
+            - const: bus
+            - const: bus_a
+            - const: aggre2_ufs_axi
+            - const: ufs_axi
+
+        clocks:
+          items:
+            - description: Bus Clock
+            - description: Bus A Clock
+            - description: Aggregate2 NoC UFS AXI Clock
+            - description: UFS AXI Clock
+
   - if:
       properties:
         compatible:
index 1432fda3b603f74e11d287f8a29d7bf03a042e79..47bc2057e6292a829dc25b2ef5e2f70ea6a3efa8 100644 (file)
@@ -40,6 +40,9 @@ properties:
   clock-names:
     const: stmmaceth
 
+  phy-supply:
+    description: PHY regulator
+
   syscon:
     $ref: /schemas/types.yaml#/definitions/phandle
     description:
index d2906b4a0f5957bb13b20554a6e404ecd4ed489b..e35da8b01dc257f9d5a96fcad1807ae5ea3df06e 100644 (file)
@@ -16,9 +16,6 @@ description: |
   8k has a second unit which provides an interface with the xMDIO bus. This
   driver handles these interfaces.
 
-allOf:
-  - $ref: "mdio.yaml#"
-
 properties:
   compatible:
     enum:
@@ -39,13 +36,38 @@ required:
   - compatible
   - reg
 
+allOf:
+  - $ref: mdio.yaml#
+
+  - if:
+      required:
+        - interrupts
+
+    then:
+      properties:
+        reg:
+          items:
+            - items:
+                - $ref: /schemas/types.yaml#/definitions/cell
+                - const: 0x84
+
+    else:
+      properties:
+        reg:
+          items:
+            - items:
+                - $ref: /schemas/types.yaml#/definitions/cell
+                - enum:
+                    - 0x4
+                    - 0x10
+
 unevaluatedProperties: false
 
 examples:
   - |
     mdio@d0072004 {
       compatible = "marvell,orion-mdio";
-      reg = <0xd0072004 0x4>;
+      reg = <0xd0072004 0x84>;
       #address-cells = <1>;
       #size-cells = <0>;
       interrupts = <30>;
diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb2-phy.yaml
new file mode 100644 (file)
index 0000000..bb01c6b
--- /dev/null
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb2-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic G12A USB2 PHY
+
+maintainers:
+  - Neil Armstrong <neil.armstrong@linaro.org>
+
+properties:
+  compatible:
+    enum:
+      - amlogic,g12a-usb2-phy
+      - amlogic,a1-usb2-phy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: xtal
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    items:
+      - const: phy
+
+  "#phy-cells":
+    const: 0
+
+  phy-supply:
+    description:
+      Phandle to a regulator that provides power to the PHY. This
+      regulator will be managed during the PHY power on/off sequence.
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - "#phy-cells"
+
+if:
+  properties:
+    compatible:
+      enum:
+        - amlogic,meson-a1-usb-ctrl
+
+then:
+  properties:
+    power-domains:
+      maxItems: 1
+  required:
+    - power-domains
+
+additionalProperties: false
+
+examples:
+  - |
+    phy@36000 {
+          compatible = "amlogic,g12a-usb2-phy";
+          reg = <0x36000 0x2000>;
+          clocks = <&xtal>;
+          clock-names = "xtal";
+          resets = <&phy_reset>;
+          reset-names = "phy";
+          #phy-cells = <0>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,g12a-usb3-pcie-phy.yaml
new file mode 100644 (file)
index 0000000..129d26e
--- /dev/null
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 BayLibre, SAS
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,g12a-usb3-pcie-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic G12A USB3 + PCIE Combo PHY
+
+maintainers:
+  - Neil Armstrong <neil.armstrong@linaro.org>
+
+properties:
+  compatible:
+    enum:
+      - amlogic,g12a-usb3-pcie-phy
+
+  reg:
+    maxItems: 1
+
+  clocks:
+    maxItems: 1
+
+  clock-names:
+    items:
+      - const: ref_clk
+
+  resets:
+    maxItems: 1
+
+  reset-names:
+    items:
+      - const: phy
+
+  "#phy-cells":
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - clocks
+  - clock-names
+  - resets
+  - reset-names
+  - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    phy@46000 {
+          compatible = "amlogic,g12a-usb3-pcie-phy";
+          reg = <0x46000 0x2000>;
+          clocks = <&ref_clk>;
+          clock-names = "ref_clk";
+          resets = <&phy_reset>;
+          reset-names = "phy";
+          #phy-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
deleted file mode 100644 (file)
index f3a5fba..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-# Copyright 2019 BayLibre, SAS
-%YAML 1.2
----
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb2-phy.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
-
-title: Amlogic G12A USB2 PHY
-
-maintainers:
-  - Neil Armstrong <neil.armstrong@linaro.org>
-
-properties:
-  compatible:
-    enum:
-      - amlogic,meson-g12a-usb2-phy
-      - amlogic,meson-a1-usb2-phy
-
-  reg:
-    maxItems: 1
-
-  clocks:
-    maxItems: 1
-
-  clock-names:
-    items:
-      - const: xtal
-
-  resets:
-    maxItems: 1
-
-  reset-names:
-    items:
-      - const: phy
-
-  "#phy-cells":
-    const: 0
-
-  phy-supply:
-    description:
-      Phandle to a regulator that provides power to the PHY. This
-      regulator will be managed during the PHY power on/off sequence.
-
-required:
-  - compatible
-  - reg
-  - clocks
-  - clock-names
-  - resets
-  - reset-names
-  - "#phy-cells"
-
-if:
-  properties:
-    compatible:
-      enum:
-        - amlogic,meson-a1-usb-ctrl
-
-then:
-  properties:
-    power-domains:
-      maxItems: 1
-  required:
-    - power-domains
-
-additionalProperties: false
-
-examples:
-  - |
-    phy@36000 {
-          compatible = "amlogic,meson-g12a-usb2-phy";
-          reg = <0x36000 0x2000>;
-          clocks = <&xtal>;
-          clock-names = "xtal";
-          resets = <&phy_reset>;
-          reset-names = "phy";
-          #phy-cells = <0>;
-    };
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
deleted file mode 100644 (file)
index 868b4e6..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-# Copyright 2019 BayLibre, SAS
-%YAML 1.2
----
-$id: "http://devicetree.org/schemas/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml#"
-$schema: "http://devicetree.org/meta-schemas/core.yaml#"
-
-title: Amlogic G12A USB3 + PCIE Combo PHY
-
-maintainers:
-  - Neil Armstrong <neil.armstrong@linaro.org>
-
-properties:
-  compatible:
-    enum:
-      - amlogic,meson-g12a-usb3-pcie-phy
-
-  reg:
-    maxItems: 1
-
-  clocks:
-    maxItems: 1
-
-  clock-names:
-    items:
-      - const: ref_clk
-
-  resets:
-    maxItems: 1
-
-  reset-names:
-    items:
-      - const: phy
-
-  "#phy-cells":
-    const: 1
-
-required:
-  - compatible
-  - reg
-  - clocks
-  - clock-names
-  - resets
-  - reset-names
-  - "#phy-cells"
-
-additionalProperties: false
-
-examples:
-  - |
-    phy@46000 {
-          compatible = "amlogic,meson-g12a-usb3-pcie-phy";
-          reg = <0x46000 0x2000>;
-          clocks = <&ref_clk>;
-          clock-names = "ref_clk";
-          resets = <&phy_reset>;
-          reset-names = "phy";
-          #phy-cells = <1>;
-    };
index abcc4373f39e1898244abeca9f7308f4de357d51..ca6a0836b53c4168e29830b1a5591cfc9453ed70 100644 (file)
@@ -16,7 +16,6 @@ properties:
   compatible:
     enum:
       - qcom,usb-hs-28nm-femtophy
-      - qcom,usb-hs-28nm-mdm9607
 
   reg:
     maxItems: 1
index 290555426c39ca90ebeb087cce514f51c4cc92ff..bdf482db32aac1fc84f42aa9b72cf6d64c798933 100644 (file)
@@ -39,8 +39,8 @@ properties:
   qcom,protection-domain:
     $ref: /schemas/types.yaml#/definitions/string-array
     description: |
-      Protection domain service name and path for APR service
-      possible values are::
+      Protection domain service name and path for APR service (if supported).
+      Possible values are::
       "avs/audio", "msm/adsp/audio_pd".
       "kernel/elf_loader", "msm/modem/wlan_pd".
       "tms/servreg", "msm/adsp/audio_pd".
@@ -49,6 +49,5 @@ properties:
 
 required:
   - reg
-  - qcom,protection-domain
 
 additionalProperties: true
index 9d3139990237408ce2ff823cfb9b869cd84d4b56..aa23b0024c461d596f81071a70833daa565c7358 100644 (file)
@@ -16,6 +16,7 @@ properties:
   compatible:
     enum:
       - mediatek,mt8186-mt6366-rt1019-rt5682s-sound
+      - mediatek,mt8186-mt6366-rt5682s-max98360-sound
 
   mediatek,platform:
     $ref: "/schemas/types.yaml#/definitions/phandle"
index 66431aade3b70a1794bd317af60a9a2cadd8cefd..da5f70910da5204ee0c37bba62f3f7473852983d 100644 (file)
@@ -30,7 +30,9 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    oneOf:
+      - maxItems: 3
+      - maxItems: 5
 
   clock-names:
     oneOf:
index 2bf8d082f8f1044d64bb9493eea892f14b629378..66cbb1f5e31a5cdaf53126972b1f32c4122fed67 100644 (file)
@@ -9,9 +9,6 @@ title: LPASS(Low Power Audio Subsystem) VA Macro audio codec
 maintainers:
   - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
 
-allOf:
-  - $ref: dai-common.yaml#
-
 properties:
   compatible:
     enum:
@@ -30,15 +27,12 @@ properties:
     const: 0
 
   clocks:
-    maxItems: 5
+    minItems: 5
+    maxItems: 6
 
   clock-names:
-    items:
-      - const: mclk
-      - const: npl
-      - const: macro
-      - const: dcodec
-      - const: fsgen
+    minItems: 5
+    maxItems: 6
 
   clock-output-names:
     maxItems: 1
@@ -55,10 +49,51 @@ required:
   - reg
   - "#sound-dai-cells"
 
+allOf:
+  - $ref: dai-common.yaml#
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sc7280-lpass-wsa-macro
+            - qcom,sm8450-lpass-wsa-macro
+            - qcom,sc8280xp-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          maxItems: 5
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: fsgen
+
+  - if:
+      properties:
+        compatible:
+          enum:
+            - qcom,sm8250-lpass-wsa-macro
+    then:
+      properties:
+        clocks:
+          minItems: 6
+        clock-names:
+          items:
+            - const: mclk
+            - const: npl
+            - const: macro
+            - const: dcodec
+            - const: va
+            - const: fsgen
+
 unevaluatedProperties: false
 
 examples:
   - |
+    #include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
     #include <dt-bindings/sound/qcom,q6afe.h>
     codec@3240000 {
       compatible = "qcom,sm8250-lpass-wsa-macro";
@@ -69,7 +104,8 @@ examples:
                <&audiocc 0>,
                <&q6afecc LPASS_HW_MACRO_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
                <&q6afecc LPASS_HW_DCODEC_VOTE LPASS_CLK_ATTRIBUTE_COUPLE_NO>,
+               <&aoncc LPASS_CDC_VA_MCLK>,
                <&vamacro>;
-      clock-names = "mclk", "npl", "macro", "dcodec", "fsgen";
+      clock-names = "mclk", "npl", "macro", "dcodec", "va", "fsgen";
       clock-output-names = "mclk";
     };
index bcbfa71536cda602d40e1e6a4a0d3b9bfb468a31..3efdc192ab019182571338fef555dd3a9653c6f7 100644 (file)
@@ -80,7 +80,7 @@ properties:
       or applicable for the respective data port.
       More info in MIPI Alliance SoundWire 1.0 Specifications.
     minItems: 3
-    maxItems: 5
+    maxItems: 8
 
   qcom,ports-sinterval-low:
     $ref: /schemas/types.yaml#/definitions/uint8-array
@@ -124,7 +124,7 @@ properties:
       or applicable for the respective data port.
       More info in MIPI Alliance SoundWire 1.0 Specifications.
     minItems: 3
-    maxItems: 5
+    maxItems: 8
 
   qcom,ports-block-pack-mode:
     $ref: /schemas/types.yaml#/definitions/uint8-array
@@ -154,7 +154,7 @@ properties:
       or applicable for the respective data port.
       More info in MIPI Alliance SoundWire 1.0 Specifications.
     minItems: 3
-    maxItems: 5
+    maxItems: 8
     items:
       oneOf:
         - minimum: 0
@@ -171,7 +171,7 @@ properties:
       or applicable for the respective data port.
       More info in MIPI Alliance SoundWire 1.0 Specifications.
     minItems: 3
-    maxItems: 5
+    maxItems: 8
     items:
       oneOf:
         - minimum: 0
@@ -187,7 +187,7 @@ properties:
       or applicable for the respective data port.
       More info in MIPI Alliance SoundWire 1.0 Specifications.
     minItems: 3
-    maxItems: 5
+    maxItems: 8
     items:
       oneOf:
         - minimum: 0
index 4dd973e341e6c772a2f6f1d945ce31eb6aab325e..6c57dd6c3a3614d3ed883bff2d1ed2fb20edf483 100644 (file)
@@ -8,7 +8,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel SPI device
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 allOf:
   - $ref: spi-controller.yaml#
index 1d493add4053dd11add73e67e0e35ca81a12f16b..b0d99bc105352c5bcae56708896d508b2680850d 100644 (file)
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Atmel Quad Serial Peripheral Interface (QSPI)
 
 maintainers:
-  - Tudor Ambarus <tudor.ambarus@microchip.com>
+  - Tudor Ambarus <tudor.ambarus@linaro.org>
 
 allOf:
   - $ref: spi-controller.yaml#
index ead2cccf658fdee57208afaa695fabe68668d1a4..9a60c0664bbe7df45fb113c3a314105c2ad5e4c0 100644 (file)
@@ -44,9 +44,9 @@ properties:
     description:
       Maximum SPI clocking speed of the device in Hz.
 
-  spi-cs-setup-ns:
+  spi-cs-setup-delay-ns:
     description:
-      Delay in nanosecods to be introduced by the controller after CS is
+      Delay in nanoseconds to be introduced by the controller after CS is
       asserted.
 
   spi-rx-bus-width:
index 067fd1670b1f1d6937d214f33309c4fc55e29923..a43aacf1494e778585aea093fb4c099dc4459be7 100644 (file)
@@ -120,6 +120,8 @@ dax={always,never}     Use direct access (no page cache).  See
 dax                    A legacy option which is an alias for ``dax=always``.
 device=%s              Specify a path to an extra device to be used together.
 fsid=%s                Specify a filesystem image ID for Fscache back-end.
+domain_id=%s           Specify a domain ID in fscache mode so that different images
+                       with the same blobs under a given domain ID can share storage.
 ===================    =========================================================
 
 Sysfs Entries
index 6b7368d1f51639c016e4be5f4c3c4f51b0f29991..38bc74eaa547403935a2ae645cb0ec875a1a0279 100644 (file)
@@ -1042,7 +1042,7 @@ $(clean-files).
 
 When executing "make clean", the file "crc32table.h" will be deleted.
 Kbuild will assume files to be in the same relative directory as the
-Makefile, except if prefixed with $(objtree).
+Makefile.
 
 To exclude certain files or directories from make clean, use the
 $(no-clean-files) variable.
index 93b2ae6c34a99b86915f1a53b54235135a131973..cfd37f31077f6cd96e0c8ebea68ba3f98235d652 100644 (file)
@@ -104,3 +104,4 @@ to do something different in the near future.
    ../riscv/patch-acceptance
    ../driver-api/media/maintainer-entry-profile
    ../driver-api/vfio-pci-device-specific-driver-acceptance
+   ../nvme/feature-and-quirk-policy
index 39494a6ea739cf02217f088721ff23e1751521fe..e1af54424192b375f701992a95ffeeb68a76de8c 100644 (file)
@@ -880,8 +880,8 @@ The kernel interface functions are as follows:
 
      notify_end_rx can be NULL or it can be used to specify a function to be
      called when the call changes state to end the Tx phase.  This function is
-     called with the call-state spinlock held to prevent any reply or final ACK
-     from being delivered first.
+     called with a spinlock held to prevent the last DATA packet from being
+     transmitted until the function returns.
 
  (#) Receive data from a call::
 
diff --git a/Documentation/nvme/feature-and-quirk-policy.rst b/Documentation/nvme/feature-and-quirk-policy.rst
new file mode 100644 (file)
index 0000000..c01d836
--- /dev/null
@@ -0,0 +1,77 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Linux NVMe feature and and quirk policy
+=======================================
+
+This file explains the policy used to decide what is supported by the
+Linux NVMe driver and what is not.
+
+
+Introduction
+============
+
+NVM Express is an open collection of standards and information.
+
+The Linux NVMe host driver in drivers/nvme/host/ supports devices
+implementing the NVM Express (NVMe) family of specifications, which
+currently consists of a number of documents:
+
+ - the NVMe Base specification
+ - various Command Set specifications (e.g. NVM Command Set)
+ - various Transport specifications (e.g. PCIe, Fibre Channel, RDMA, TCP)
+ - the NVMe Management Interface specification
+
+See https://nvmexpress.org/developers/ for the NVMe specifications.
+
+
+Supported features
+==================
+
+NVMe is a large suite of specifications, and contains features that are only
+useful or suitable for specific use-cases. It is important to note that Linux
+does not aim to implement every feature in the specification.  Every additional
+feature implemented introduces more code, more maintenance and potentially more
+bugs.  Hence there is an inherent tradeoff between functionality and
+maintainability of the NVMe host driver.
+
+Any feature implemented in the Linux NVMe host driver must support the
+following requirements:
+
+  1. The feature is specified in a release version of an official NVMe
+     specification, or in a ratified Technical Proposal (TP) that is
+     available on NVMe website. Or if it is not directly related to the
+     on-wire protocol, does not contradict any of the NVMe specifications.
+  2. Does not conflict with the Linux architecture, nor the design of the
+     NVMe host driver.
+  3. Has a clear, indisputable value-proposition and a wide consensus across
+     the community.
+
+Vendor specific extensions are generally not supported in the NVMe host
+driver.
+
+It is strongly recommended to work with the Linux NVMe and block layer
+maintainers and get feedback on specification changes that are intended
+to be used by the Linux NVMe host driver in order to avoid conflict at a
+later stage.
+
+
+Quirks
+======
+
+Sometimes implementations of open standards fail to correctly implement parts
+of the standards.  Linux uses identifier-based quirks to work around such
+implementation bugs.  The intent of quirks is to deal with widely available
+hardware, usually consumer, which Linux users can't use without these quirks.
+Typically these implementations are not or only superficially tested with Linux
+by the hardware manufacturer.
+
+The Linux NVMe maintainers decide ad hoc whether to quirk implementations
+based on the impact of the problem to Linux users and how it impacts
+maintainability of the driver.  In general quirks are a last resort, if no
+firmware updates or other workarounds are available from the vendor.
+
+Quirks will not be added to the Linux kernel for hardware that isn't available
+on the mass market.  Hardware that fails qualification for enterprise Linux
+distributions, ChromeOS, Android or other consumers of the Linux kernel
+should be fixed before it is shipped instead of relying on Linux quirks.
index 1fa5ab8754d358a80f707e544ab03d4b9734e3b1..4a75686d35ab4a646babcadb8d187bde8600038a 100644 (file)
@@ -2,9 +2,9 @@
 
 .. _netdev-FAQ:
 
-==========
-netdev FAQ
-==========
+=============================
+Networking subsystem (netdev)
+=============================
 
 tl;dr
 -----
@@ -15,14 +15,15 @@ tl;dr
  - don't repost your patches within one 24h period
  - reverse xmas tree
 
-What is netdev?
----------------
-It is a mailing list for all network-related Linux stuff.  This
+netdev
+------
+
+netdev is a mailing list for all network-related Linux stuff.  This
 includes anything found under net/ (i.e. core code like IPv6) and
 drivers/net (i.e. hardware specific drivers) in the Linux source tree.
 
 Note that some subsystems (e.g. wireless drivers) which have a high
-volume of traffic have their own specific mailing lists.
+volume of traffic have their own specific mailing lists and trees.
 
 The netdev list is managed (like many other Linux mailing lists) through
 VGER (http://vger.kernel.org/) with archives available at
@@ -32,32 +33,10 @@ Aside from subsystems like those mentioned above, all network-related
 Linux development (i.e. RFC, review, comments, etc.) takes place on
 netdev.
 
-How do the changes posted to netdev make their way into Linux?
---------------------------------------------------------------
-There are always two trees (git repositories) in play.  Both are
-driven by David Miller, the main network maintainer.  There is the
-``net`` tree, and the ``net-next`` tree.  As you can probably guess from
-the names, the ``net`` tree is for fixes to existing code already in the
-mainline tree from Linus, and ``net-next`` is where the new code goes
-for the future release.  You can find the trees here:
-
-- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
-- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
-
-How do I indicate which tree (net vs. net-next) my patch should be in?
-----------------------------------------------------------------------
-To help maintainers and CI bots you should explicitly mark which tree
-your patch is targeting. Assuming that you use git, use the prefix
-flag::
-
-  git format-patch --subject-prefix='PATCH net-next' start..finish
+Development cycle
+-----------------
 
-Use ``net`` instead of ``net-next`` (always lower case) in the above for
-bug-fix ``net`` content.
-
-How often do changes from these trees make it to the mainline Linus tree?
--------------------------------------------------------------------------
-To understand this, you need to know a bit of background information on
+Here is a bit of background information on
 the cadence of Linux development.  Each new release starts off with a
 two week "merge window" where the main maintainers feed their new stuff
 to Linus for merging into the mainline tree.  After the two weeks, the
@@ -69,9 +48,33 @@ rc2 is released.  This repeats on a roughly weekly basis until rc7
 state of churn), and a week after the last vX.Y-rcN was done, the
 official vX.Y is released.
 
-Relating that to netdev: At the beginning of the 2-week merge window,
-the ``net-next`` tree will be closed - no new changes/features.  The
-accumulated new content of the past ~10 weeks will be passed onto
+To find out where we are now in the cycle - load the mainline (Linus)
+page here:
+
+  https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
+
+and note the top of the "tags" section.  If it is rc1, it is early in
+the dev cycle.  If it was tagged rc7 a week ago, then a release is
+probably imminent. If the most recent tag is a final release tag
+(without an ``-rcN`` suffix) - we are most likely in a merge window
+and ``net-next`` is closed.
+
+git trees and patch flow
+------------------------
+
+There are two networking trees (git repositories) in play.  Both are
+driven by David Miller, the main network maintainer.  There is the
+``net`` tree, and the ``net-next`` tree.  As you can probably guess from
+the names, the ``net`` tree is for fixes to existing code already in the
+mainline tree from Linus, and ``net-next`` is where the new code goes
+for the future release.  You can find the trees here:
+
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
+- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
+
+Relating that to kernel development: At the beginning of the 2-week
+merge window, the ``net-next`` tree will be closed - no new changes/features.
+The accumulated new content of the past ~10 weeks will be passed onto
 mainline/Linus via a pull request for vX.Y -- at the same time, the
 ``net`` tree will start accumulating fixes for this pulled content
 relating to vX.Y
@@ -103,22 +106,14 @@ focus for ``net`` is on stabilization and bug fixes.
 
 Finally, the vX.Y gets released, and the whole cycle starts over.
 
-So where are we now in this cycle?
-----------------------------------
+netdev patch review
+-------------------
 
-Load the mainline (Linus) page here:
+Patch status
+~~~~~~~~~~~~
 
-  https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
-
-and note the top of the "tags" section.  If it is rc1, it is early in
-the dev cycle.  If it was tagged rc7 a week ago, then a release is
-probably imminent. If the most recent tag is a final release tag
-(without an ``-rcN`` suffix) - we are most likely in a merge window
-and ``net-next`` is closed.
-
-How can I tell the status of a patch I've sent?
------------------------------------------------
-Start by looking at the main patchworks queue for netdev:
+Status of a patch can be checked by looking at the main patchwork
+queue for netdev:
 
   https://patchwork.kernel.org/project/netdevbpf/list/
 
@@ -127,73 +122,141 @@ patch. Patches are indexed by the ``Message-ID`` header of the emails
 which carried them so if you have trouble finding your patch append
 the value of ``Message-ID`` to the URL above.
 
-How long before my patch is accepted?
--------------------------------------
-Generally speaking, the patches get triaged quickly (in less than
-48h). But be patient, if your patch is active in patchwork (i.e. it's
-listed on the project's patch list) the chances it was missed are close to zero.
-Asking the maintainer for status updates on your
-patch is a good way to ensure your patch is ignored or pushed to the
-bottom of the priority list.
+Updating patch status
+~~~~~~~~~~~~~~~~~~~~~
 
-Should I directly update patchwork state of my own patches?
------------------------------------------------------------
 It may be tempting to help the maintainers and update the state of your
-own patches when you post a new version or spot a bug. Please do not do that.
+own patches when you post a new version or spot a bug. Please **do not**
+do that.
 Interfering with the patch status on patchwork will only cause confusion. Leave
 it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
-How do I divide my work into patches?
--------------------------------------
+Review timelines
+~~~~~~~~~~~~~~~~
 
-Put yourself in the shoes of the reviewer. Each patch is read separately
-and therefore should constitute a comprehensible step towards your stated
-goal.
+Generally speaking, the patches get triaged quickly (in less than
+48h). But be patient, if your patch is active in patchwork (i.e. it's
+listed on the project's patch list) the chances it was missed are close to zero.
+Asking the maintainer for status updates on your
+patch is a good way to ensure your patch is ignored or pushed to the
+bottom of the priority list.
 
-Avoid sending series longer than 15 patches. Larger series takes longer
-to review as reviewers will defer looking at it until they find a large
-chunk of time. A small series can be reviewed in a short time, so Maintainers
-just do it. As a result, a sequence of smaller series gets merged quicker and
-with better review coverage. Re-posting large series also increases the mailing
-list traffic.
+Partial resends
+~~~~~~~~~~~~~~~
 
-I made changes to only a few patches in a patch series should I resend only those changed?
-------------------------------------------------------------------------------------------
-No, please resend the entire patch series and make sure you do number your
+Please always resend the entire patch series and make sure you do number your
 patches such that it is clear this is the latest and greatest set of patches
-that can be applied.
-
-I have received review feedback, when should I post a revised version of the patches?
--------------------------------------------------------------------------------------
-Allow at least 24 hours to pass between postings. This will ensure reviewers
-from all geographical locations have a chance to chime in. Do not wait
-too long (weeks) between postings either as it will make it harder for reviewers
-to recall all the context.
+that can be applied. Do not try to resend just the patches which changed.
 
-Make sure you address all the feedback in your new posting. Do not post a new
-version of the code if the discussion about the previous version is still
-ongoing, unless directly instructed by a reviewer.
+Handling misapplied patches
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
-----------------------------------------------------------------------------------------------------------------------------------------
+Occasionally a patch series gets applied before receiving critical feedback,
+or the wrong version of a series gets applied.
 There is no revert possible, once it is pushed out, it stays like that.
 Please send incremental versions on top of what has been merged in order to fix
 the patches the way they would look like if your latest patch series was to be
 merged.
 
-Are there special rules regarding stable submissions on netdev?
----------------------------------------------------------------
+Stable tree
+~~~~~~~~~~~
+
 While it used to be the case that netdev submissions were not supposed
 to carry explicit ``CC: stable@vger.kernel.org`` tags that is no longer
 the case today. Please follow the standard stable rules in
 :ref:`Documentation/process/stable-kernel-rules.rst <stable_kernel_rules>`,
 and make sure you include appropriate Fixes tags!
 
-Is the comment style convention different for the networking content?
----------------------------------------------------------------------
-Yes, in a largely trivial way.  Instead of this::
+Security fixes
+~~~~~~~~~~~~~~
+
+Do not email netdev maintainers directly if you think you discovered
+a bug that might have possible security implications.
+The current netdev maintainer has consistently requested that
+people use the mailing lists and not reach out directly.  If you aren't
+OK with that, then perhaps consider mailing security@kernel.org or
+reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
+as possible alternative mechanisms.
+
+
+Co-posting changes to user space components
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+User space code exercising kernel features should be posted
+alongside kernel patches. This gives reviewers a chance to see
+how any new interface is used and how well it works.
+
+When user space tools reside in the kernel repo itself all changes
+should generally come as one series. If series becomes too large
+or the user space project is not reviewed on netdev include a link
+to a public repo where user space patches can be seen.
+
+In case user space tooling lives in a separate repository but is
+reviewed on netdev  (e.g. patches to ``iproute2`` tools) kernel and
+user space patches should form separate series (threads) when posted
+to the mailing list, e.g.::
+
+  [PATCH net-next 0/3] net: some feature cover letter
+   └─ [PATCH net-next 1/3] net: some feature prep
+   └─ [PATCH net-next 2/3] net: some feature do it
+   └─ [PATCH net-next 3/3] selftest: net: some feature
+
+  [PATCH iproute2-next] ip: add support for some feature
+
+Posting as one thread is discouraged because it confuses patchwork
+(as of patchwork 2.2.2).
+
+Preparing changes
+-----------------
+
+Attention to detail is important.  Re-read your own work as if you were the
+reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
+the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
+If your change is a bug fix, make sure your commit log indicates the
+end-user visible symptom, the underlying reason as to why it happens,
+and then if necessary, explain why the fix proposed is the best way to
+get things done.  Don't mangle whitespace, and as is common, don't
+mis-indent function arguments that span multiple lines.  If it is your
+first patch, mail it to yourself so you can test apply it to an
+unpatched tree to confirm infrastructure didn't mangle it.
+
+Finally, go back and read
+:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
+to be sure you are not repeating some common mistake documented there.
+
+Indicating target tree
+~~~~~~~~~~~~~~~~~~~~~~
+
+To help maintainers and CI bots you should explicitly mark which tree
+your patch is targeting. Assuming that you use git, use the prefix
+flag::
+
+  git format-patch --subject-prefix='PATCH net-next' start..finish
+
+Use ``net`` instead of ``net-next`` (always lower case) in the above for
+bug-fix ``net`` content.
+
+Dividing work into patches
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Put yourself in the shoes of the reviewer. Each patch is read separately
+and therefore should constitute a comprehensible step towards your stated
+goal.
+
+Avoid sending series longer than 15 patches. Larger series takes longer
+to review as reviewers will defer looking at it until they find a large
+chunk of time. A small series can be reviewed in a short time, so Maintainers
+just do it. As a result, a sequence of smaller series gets merged quicker and
+with better review coverage. Re-posting large series also increases the mailing
+list traffic.
+
+Multi-line comments
+~~~~~~~~~~~~~~~~~~~
+
+Comment style convention is slightly different for networking and most of
+the tree.  Instead of this::
 
   /*
    * foobar blah blah blah
@@ -206,8 +269,8 @@ it is requested that you make it look like this::
    * another line of text
    */
 
-What is "reverse xmas tree"?
-----------------------------
+Local variable ordering ("reverse xmas tree", "RCS")
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Netdev has a convention for ordering local variables in functions.
 Order the variable declaration lines longest to shortest, e.g.::
@@ -219,21 +282,31 @@ Order the variable declaration lines longest to shortest, e.g.::
 If there are dependencies between the variables preventing the ordering
 move the initialization out of line.
 
-I am working in existing code which uses non-standard formatting. Which formatting should I use?
-------------------------------------------------------------------------------------------------
-Make your code follow the most recent guidelines, so that eventually all code
+Format precedence
+~~~~~~~~~~~~~~~~~
+
+When working in existing code which uses nonstandard formatting make
+your code follow the most recent guidelines, so that eventually all code
 in the domain of netdev is in the preferred format.
 
-I found a bug that might have possible security implications or similar. Should I mail the main netdev maintainer off-list?
----------------------------------------------------------------------------------------------------------------------------
-No. The current netdev maintainer has consistently requested that
-people use the mailing lists and not reach out directly.  If you aren't
-OK with that, then perhaps consider mailing security@kernel.org or
-reading about http://oss-security.openwall.org/wiki/mailing-lists/distros
-as possible alternative mechanisms.
+Resending after review
+~~~~~~~~~~~~~~~~~~~~~~
+
+Allow at least 24 hours to pass between postings. This will ensure reviewers
+from all geographical locations have a chance to chime in. Do not wait
+too long (weeks) between postings either as it will make it harder for reviewers
+to recall all the context.
+
+Make sure you address all the feedback in your new posting. Do not post a new
+version of the code if the discussion about the previous version is still
+ongoing, unless directly instructed by a reviewer.
+
+Testing
+-------
+
+Expected level of testing
+~~~~~~~~~~~~~~~~~~~~~~~~~
 
-What level of testing is expected before I submit my change?
-------------------------------------------------------------
 At the very minimum your changes must survive an ``allyesconfig`` and an
 ``allmodconfig`` build with ``W=1`` set without new warnings or failures.
 
@@ -244,86 +317,42 @@ and the patch series contains a set of kernel selftest for
 You are expected to test your changes on top of the relevant networking
 tree (``net`` or ``net-next``) and not e.g. a stable tree or ``linux-next``.
 
-How do I post corresponding changes to user space components?
--------------------------------------------------------------
-User space code exercising kernel features should be posted
-alongside kernel patches. This gives reviewers a chance to see
-how any new interface is used and how well it works.
-
-When user space tools reside in the kernel repo itself all changes
-should generally come as one series. If series becomes too large
-or the user space project is not reviewed on netdev include a link
-to a public repo where user space patches can be seen.
-
-In case user space tooling lives in a separate repository but is
-reviewed on netdev  (e.g. patches to ``iproute2`` tools) kernel and
-user space patches should form separate series (threads) when posted
-to the mailing list, e.g.::
-
-  [PATCH net-next 0/3] net: some feature cover letter
-   └─ [PATCH net-next 1/3] net: some feature prep
-   └─ [PATCH net-next 2/3] net: some feature do it
-   └─ [PATCH net-next 3/3] selftest: net: some feature
-
-  [PATCH iproute2-next] ip: add support for some feature
-
-Posting as one thread is discouraged because it confuses patchwork
-(as of patchwork 2.2.2).
-
-Can I reproduce the checks from patchwork on my local machine?
---------------------------------------------------------------
+patchwork checks
+~~~~~~~~~~~~~~~~
 
 Checks in patchwork are mostly simple wrappers around existing kernel
 scripts, the sources are available at:
 
 https://github.com/kuba-moo/nipa/tree/master/tests
 
-Running all the builds and checks locally is a pain, can I post my patches and have the patchwork bot validate them?
---------------------------------------------------------------------------------------------------------------------
-
-No, you must ensure that your patches are ready by testing them locally
+**Do not** post your patches just to run them through the checks.
+You must ensure that your patches are ready by testing them locally
 before posting to the mailing list. The patchwork build bot instance
 gets overloaded very easily and netdev@vger really doesn't need more
 traffic if we can help it.
 
-netdevsim is great, can I extend it for my out-of-tree tests?
--------------------------------------------------------------
+netdevsim
+~~~~~~~~~
 
-No, ``netdevsim`` is a test vehicle solely for upstream tests.
-(Please add your tests under ``tools/testing/selftests/``.)
+``netdevsim`` is a test driver which can be used to exercise driver
+configuration APIs without requiring capable hardware.
+Mock-ups and tests based on ``netdevsim`` are strongly encouraged when
+adding new APIs, but ``netdevsim`` in itself is **not** considered
+a use case/user. You must also implement the new APIs in a real driver.
 
-We also give no guarantees that ``netdevsim`` won't change in the future
+We give no guarantees that ``netdevsim`` won't change in the future
 in a way which would break what would normally be considered uAPI.
 
-Is netdevsim considered a "user" of an API?
--------------------------------------------
-
-Linux kernel has a long standing rule that no API should be added unless
-it has a real, in-tree user. Mock-ups and tests based on ``netdevsim`` are
-strongly encouraged when adding new APIs, but ``netdevsim`` in itself
-is **not** considered a use case/user.
-
-Any other tips to help ensure my net/net-next patch gets OK'd?
---------------------------------------------------------------
-Attention to detail.  Re-read your own work as if you were the
-reviewer.  You can start with using ``checkpatch.pl``, perhaps even with
-the ``--strict`` flag.  But do not be mindlessly robotic in doing so.
-If your change is a bug fix, make sure your commit log indicates the
-end-user visible symptom, the underlying reason as to why it happens,
-and then if necessary, explain why the fix proposed is the best way to
-get things done.  Don't mangle whitespace, and as is common, don't
-mis-indent function arguments that span multiple lines.  If it is your
-first patch, mail it to yourself so you can test apply it to an
-unpatched tree to confirm infrastructure didn't mangle it.
-
-Finally, go back and read
-:ref:`Documentation/process/submitting-patches.rst <submittingpatches>`
-to be sure you are not repeating some common mistake documented there.
+``netdevsim`` is reserved for use by upstream tests only, so any
+new ``netdevsim`` features must be accompanied by selftests under
+``tools/testing/selftests/``.
 
-My company uses peer feedback in employee performance reviews. Can I ask netdev maintainers for feedback?
----------------------------------------------------------------------------------------------------------
+Testimonials / feedback
+-----------------------
 
-Yes, especially if you spend significant amount of time reviewing code
+Some companies use peer feedback in employee performance reviews.
+Please feel free to request feedback from netdev maintainers,
+especially if you spend significant amount of time reviewing code
 and go out of your way to improve shared infrastructure.
 
 The feedback must be requested by you, the contributor, and will always
index eeb394b39e2cc8dc8d8f866a379cf492f5d8b73b..8b416bfd75ac1711d0a6275a5bf84f71c3e40a88 100644 (file)
@@ -3,7 +3,7 @@
 
 import os
 import sys
-from sphinx.util.pycompat import execfile_
+from sphinx.util.osutil import fs_encoding
 
 # ------------------------------------------------------------------------------
 def loadConfig(namespace):
@@ -48,7 +48,9 @@ def loadConfig(namespace):
             sys.stdout.write("load additional sphinx-config: %s\n" % config_file)
             config = namespace.copy()
             config['__file__'] = config_file
-            execfile_(config_file, config)
+            with open(config_file, 'rb') as f:
+                code = compile(f.read(), fs_encoding, 'exec')
+                exec(code, config)
             del config['__file__']
             namespace.update(config)
         else:
index 0dd5d8733dd583ff77a4ef9072831e12e289a3b1..9807b05a1b57109f1051ac5fb24823cdf13fed12 100644 (file)
@@ -1354,6 +1354,14 @@ the memory region are automatically reflected into the guest.  For example, an
 mmap() that affects the region will be made visible immediately.  Another
 example is madvise(MADV_DROP).
 
+Note: On arm64, a write generated by the page-table walker (to update
+the Access and Dirty flags, for example) never results in a
+KVM_EXIT_MMIO exit when the slot has the KVM_MEM_READONLY flag. This
+is because KVM cannot provide the data that would be written by the
+page-table walker, making it impossible to emulate the access.
+Instead, an abort (data abort if the cause of the page-table update
+was a load or a store, instruction abort if it was an instruction
+fetch) is injected in the guest.
 
 4.36 KVM_SET_TSS_ADDR
 ---------------------
@@ -5343,9 +5351,9 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
   32 vCPUs in the shared_info page, KVM does not automatically do so
   and instead requires that KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO be used
   explicitly even when the vcpu_info for a given vCPU resides at the
-  "default" location in the shared_info page. This is because KVM is
-  not aware of the Xen CPU id which is used as the index into the
-  vcpu_info[] array, so cannot know the correct default location.
+  "default" location in the shared_info page. This is because KVM may
+  not be aware of the Xen CPU id which is used as the index into the
+  vcpu_info[] array, so may know the correct default location.
 
   Note that the shared info page may be constantly written to by KVM;
   it contains the event channel bitmap used to deliver interrupts to
@@ -5356,23 +5364,29 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
   any vCPU has been running or any event channel interrupts can be
   routed to the guest.
 
+  Setting the gfn to KVM_XEN_INVALID_GFN will disable the shared info
+  page.
+
 KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
   Sets the exception vector used to deliver Xen event channel upcalls.
   This is the HVM-wide vector injected directly by the hypervisor
   (not through the local APIC), typically configured by a guest via
-  HVM_PARAM_CALLBACK_IRQ.
+  HVM_PARAM_CALLBACK_IRQ. This can be disabled again (e.g. for guest
+  SHUTDOWN_soft_reset) by setting it to zero.
 
 KVM_XEN_ATTR_TYPE_EVTCHN
   This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
   support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It configures
   an outbound port number for interception of EVTCHNOP_send requests
-  from the guest. A given sending port number may be directed back
-  to a specified vCPU (by APIC ID) / port / priority on the guest,
-  or to trigger events on an eventfd. The vCPU and priority can be
-  changed by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call,
-  but other fields cannot change for a given sending port. A port
-  mapping is removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags
-  field.
+  from the guest. A given sending port number may be directed back to
+  a specified vCPU (by APIC ID) / port / priority on the guest, or to
+  trigger events on an eventfd. The vCPU and priority can be changed
+  by setting KVM_XEN_EVTCHN_UPDATE in a subsequent call, but but other
+  fields cannot change for a given sending port. A port mapping is
+  removed by using KVM_XEN_EVTCHN_DEASSIGN in the flags field. Passing
+  KVM_XEN_EVTCHN_RESET in the flags field removes all interception of
+  outbound event channels. The values of the flags field are mutually
+  exclusive and cannot be combined as a bitmask.
 
 KVM_XEN_ATTR_TYPE_XEN_VERSION
   This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
@@ -5388,7 +5402,7 @@ KVM_XEN_ATTR_TYPE_RUNSTATE_UPDATE_FLAG
   support for KVM_XEN_HVM_CONFIG_RUNSTATE_UPDATE_FLAG. It enables the
   XEN_RUNSTATE_UPDATE flag which allows guest vCPUs to safely read
   other vCPUs' vcpu_runstate_info. Xen guests enable this feature via
-  the VM_ASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
+  the VMASST_TYPE_runstate_update_flag of the HYPERVISOR_vm_assist
   hypercall.
 
 4.127 KVM_XEN_HVM_GET_ATTR
@@ -5446,15 +5460,18 @@ KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO
   As with the shared_info page for the VM, the corresponding page may be
   dirtied at any time if event channel interrupt delivery is enabled, so
   userspace should always assume that the page is dirty without relying
-  on dirty logging.
+  on dirty logging. Setting the gpa to KVM_XEN_INVALID_GPA will disable
+  the vcpu_info.
 
 KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO
   Sets the guest physical address of an additional pvclock structure
   for a given vCPU. This is typically used for guest vsyscall support.
+  Setting the gpa to KVM_XEN_INVALID_GPA will disable the structure.
 
 KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR
   Sets the guest physical address of the vcpu_runstate_info for a given
   vCPU. This is how a Xen guest tracks CPU state such as steal time.
+  Setting the gpa to KVM_XEN_INVALID_GPA will disable the runstate area.
 
 KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT
   Sets the runstate (RUNSTATE_running/_runnable/_blocked/_offline) of
@@ -5487,7 +5504,8 @@ KVM_XEN_VCPU_ATTR_TYPE_TIMER
   This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
   support for KVM_XEN_HVM_CONFIG_EVTCHN_SEND features. It sets the
   event channel port/priority for the VIRQ_TIMER of the vCPU, as well
-  as allowing a pending timer to be saved/restored.
+  as allowing a pending timer to be saved/restored. Setting the timer
+  port to zero disables kernel handling of the singleshot timer.
 
 KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
   This attribute is available when the KVM_CAP_XEN_HVM ioctl indicates
@@ -5495,7 +5513,8 @@ KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR
   per-vCPU local APIC upcall vector, configured by a Xen guest with
   the HVMOP_set_evtchn_upcall_vector hypercall. This is typically
   used by Windows guests, and is distinct from the HVM-wide upcall
-  vector configured with HVM_PARAM_CALLBACK_IRQ.
+  vector configured with HVM_PARAM_CALLBACK_IRQ. It is disabled by
+  setting the vector to zero.
 
 
 4.129 KVM_XEN_VCPU_GET_ATTR
@@ -6577,11 +6596,6 @@ Please note that the kernel is allowed to use the kvm_run structure as the
 primary storage for certain register types. Therefore, the kernel may use the
 values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
 
-::
-
-  };
-
-
 
 6. Capabilities that can be enabled on vCPUs
 ============================================
@@ -8304,6 +8318,20 @@ CPU[EAX=1]:ECX[24] (TSC_DEADLINE) is not reported by ``KVM_GET_SUPPORTED_CPUID``
 It can be enabled if ``KVM_CAP_TSC_DEADLINE_TIMER`` is present and the kernel
 has enabled in-kernel emulation of the local APIC.
 
+CPU topology
+~~~~~~~~~~~~
+
+Several CPUID values include topology information for the host CPU:
+0x0b and 0x1f for Intel systems, 0x8000001e for AMD systems.  Different
+versions of KVM return different values for this information and userspace
+should not rely on it.  Currently they return all zeroes.
+
+If userspace wishes to set up a guest topology, it should be careful that
+the values of these three leaves differ for each CPU.  In particular,
+the APIC ID is found in EDX for all subleaves of 0x0b and 0x1f, and in EAX
+for 0x8000001e; the latter also encodes the core id and node id in bits
+7:0 of EBX and ECX respectively.
+
 Obsolete ioctls and capabilities
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
index 845a561629f19ddf236d13ec27c8df1ce9029637..a0146793d1972daabb5bb424ae07484c0c5188e7 100644 (file)
@@ -16,20 +16,30 @@ The acquisition orders for mutexes are as follows:
 - kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
   them together is quite rare.
 
-- Unlike kvm->slots_lock, kvm->slots_arch_lock is released before
-  synchronize_srcu(&kvm->srcu).  Therefore kvm->slots_arch_lock
-  can be taken inside a kvm->srcu read-side critical section,
-  while kvm->slots_lock cannot.
-
 - kvm->mn_active_invalidate_count ensures that pairs of
   invalidate_range_start() and invalidate_range_end() callbacks
   use the same memslots array.  kvm->slots_lock and kvm->slots_arch_lock
   are taken on the waiting side in install_new_memslots, so MMU notifiers
   must not take either kvm->slots_lock or kvm->slots_arch_lock.
 
+For SRCU:
+
+- ``synchronize_srcu(&kvm->srcu)`` is called inside critical sections
+  for kvm->lock, vcpu->mutex and kvm->slots_lock.  These locks _cannot_
+  be taken inside a kvm->srcu read-side critical section; that is, the
+  following is broken::
+
+      srcu_read_lock(&kvm->srcu);
+      mutex_lock(&kvm->slots_lock);
+
+- kvm->slots_arch_lock instead is released before the call to
+  ``synchronize_srcu()``.  It _can_ therefore be taken inside a
+  kvm->srcu read-side critical section, for example while processing
+  a vmexit.
+
 On x86:
 
-- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock
+- vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock and kvm->arch.xen.xen_lock
 
 - kvm->arch.mmu_lock is an rwlock.  kvm->arch.tdp_mmu_pages_lock and
   kvm->arch.mmu_unsync_pages_lock are taken inside kvm->arch.mmu_lock, and
index 5dce1c45f4d1abc1c88b10ea3785a4f3024cf08f..673f105bb2ce3aa6c5e363125969ae9fcb6c14ba 100644 (file)
@@ -383,7 +383,7 @@ ACPI COMPONENT ARCHITECTURE (ACPICA)
 M:     Robert Moore <robert.moore@intel.com>
 M:     "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
 L:     linux-acpi@vger.kernel.org
-L:     devel@acpica.org
+L:     acpica-devel@lists.linuxfoundation.org
 S:     Supported
 W:     https://acpica.org/
 W:     https://github.com/acpica/acpica/
@@ -1097,14 +1097,12 @@ S:      Maintained
 F:     drivers/dma/ptdma/
 
 AMD SEATTLE DEVICE TREE SUPPORT
-M:     Brijesh Singh <brijeshkumar.singh@amd.com>
 M:     Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
 M:     Tom Lendacky <thomas.lendacky@amd.com>
 S:     Supported
 F:     arch/arm64/boot/dts/amd/
 
 AMD XGBE DRIVER
-M:     Tom Lendacky <thomas.lendacky@amd.com>
 M:     "Shyam Sundar S K" <Shyam-sundar.S-k@amd.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -6951,7 +6949,7 @@ F:        drivers/gpu/drm/atmel-hlcdc/
 DRM DRIVERS FOR BRIDGE CHIPS
 M:     Andrzej Hajda <andrzej.hajda@intel.com>
 M:     Neil Armstrong <neil.armstrong@linaro.org>
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 R:     Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
 R:     Jonas Karlman <jonas@kwiboo.se>
 R:     Jernej Skrabec <jernej.skrabec@gmail.com>
@@ -9302,7 +9300,7 @@ F:        net/dsa/tag_hellcreek.c
 
 HISILICON DMA DRIVER
 M:     Zhou Wang <wangzhou1@hisilicon.com>
-M:     Jie Hai <haijie1@hisilicon.com>
+M:     Jie Hai <haijie1@huawei.com>
 L:     dmaengine@vger.kernel.org
 S:     Maintained
 F:     drivers/dma/hisi_dma.c
@@ -11359,9 +11357,9 @@ F:      virt/kvm/*
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
 M:     Marc Zyngier <maz@kernel.org>
 R:     James Morse <james.morse@arm.com>
-R:     Alexandru Elisei <alexandru.elisei@arm.com>
 R:     Suzuki K Poulose <suzuki.poulose@arm.com>
 R:     Oliver Upton <oliver.upton@linux.dev>
+R:     Zenghui Yu <yuzenghui@huawei.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.linux.dev
 L:     kvmarm@lists.cs.columbia.edu (deprecated, moderated for non-subscribers)
@@ -11471,7 +11469,7 @@ F:      arch/x86/kvm/hyperv.*
 F:     arch/x86/kvm/kvm_onhyperv.*
 F:     arch/x86/kvm/svm/hyperv.*
 F:     arch/x86/kvm/svm/svm_onhyperv.*
-F:     arch/x86/kvm/vmx/evmcs.*
+F:     arch/x86/kvm/vmx/hyperv.*
 
 KVM X86 Xen (KVM/Xen)
 M:     David Woodhouse <dwmw2@infradead.org>
@@ -13623,7 +13621,7 @@ F:      arch/microblaze/
 
 MICROCHIP AT91 DMA DRIVERS
 M:     Ludovic Desroches <ludovic.desroches@microchip.com>
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     dmaengine@vger.kernel.org
 S:     Supported
@@ -13668,7 +13666,7 @@ F:      Documentation/devicetree/bindings/media/microchip,csi2dc.yaml
 F:     drivers/media/platform/microchip/microchip-csi2dc.c
 
 MICROCHIP ECC DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-crypto@vger.kernel.org
 S:     Maintained
 F:     drivers/crypto/atmel-ecc.*
@@ -13765,7 +13763,7 @@ S:      Maintained
 F:     drivers/mmc/host/atmel-mci.c
 
 MICROCHIP NAND DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 L:     linux-mtd@lists.infradead.org
 S:     Supported
 F:     Documentation/devicetree/bindings/mtd/atmel-nand.txt
@@ -13817,7 +13815,7 @@ S:      Supported
 F:     drivers/power/reset/at91-sama5d2_shdwc.c
 
 MICROCHIP SPI DRIVER
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 S:     Supported
 F:     drivers/spi/spi-atmel.*
 
@@ -14919,9 +14917,11 @@ L:     linux-nvme@lists.infradead.org
 S:     Supported
 W:     http://git.infradead.org/nvme.git
 T:     git://git.infradead.org/nvme.git
+F:     Documentation/nvme/
 F:     drivers/nvme/host/
 F:     drivers/nvme/common/
-F:     include/linux/nvme*
+F:     include/linux/nvme.h
+F:     include/linux/nvme-*.h
 F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS FABRICS AUTHENTICATION
@@ -15751,6 +15751,12 @@ S:     Maintained
 W:     https://wireless.wiki.kernel.org/en/users/Drivers/p54
 F:     drivers/net/wireless/intersil/p54/
 
+PACKET SOCKETS
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     include/uapi/linux/if_packet.h
+F:     net/packet/af_packet.c
+
 PACKING
 M:     Vladimir Oltean <olteanv@gmail.com>
 L:     netdev@vger.kernel.org
@@ -16612,6 +16618,13 @@ S:     Supported
 F:     Documentation/devicetree/bindings/input/pine64,pinephone-keyboard.yaml
 F:     drivers/input/keyboard/pinephone-keyboard.c
 
+PKTCDVD DRIVER
+M:     linux-block@vger.kernel.org
+S:     Orphan
+F:     drivers/block/pktcdvd.c
+F:     include/linux/pktcdvd.h
+F:     include/uapi/linux/pktcdvd.h
+
 PLANTOWER PMS7003 AIR POLLUTION SENSOR DRIVER
 M:     Tomasz Duszynski <tduszyns@gmail.com>
 S:     Maintained
@@ -17232,7 +17245,7 @@ F:      Documentation/devicetree/bindings/net/qcom,bam-dmux.yaml
 F:     drivers/net/wwan/qcom_bam_dmux.c
 
 QUALCOMM CAMERA SUBSYSTEM DRIVER
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 M:     Todor Tomov <todor.too@gmail.com>
 L:     linux-media@vger.kernel.org
 S:     Maintained
@@ -17312,7 +17325,7 @@ F:      drivers/dma/qcom/hidma*
 
 QUALCOMM I2C CCI DRIVER
 M:     Loic Poulain <loic.poulain@linaro.org>
-M:     Robert Foss <robert.foss@linaro.org>
+M:     Robert Foss <rfoss@kernel.org>
 L:     linux-i2c@vger.kernel.org
 L:     linux-arm-msm@vger.kernel.org
 S:     Maintained
@@ -19320,6 +19333,13 @@ L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Orphan
 F:     sound/soc/uniphier/
 
+SOCKET TIMESTAMPING
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     Documentation/networking/timestamping.rst
+F:     include/uapi/linux/net_tstamp.h
+F:     tools/testing/selftests/net/so_txtime.c
+
 SOEKRIS NET48XX LED SUPPORT
 M:     Chris Boot <bootc@bootc.net>
 S:     Maintained
@@ -19667,7 +19687,7 @@ F:      drivers/clk/spear/
 F:     drivers/pinctrl/spear/
 
 SPI NOR SUBSYSTEM
-M:     Tudor Ambarus <tudor.ambarus@microchip.com>
+M:     Tudor Ambarus <tudor.ambarus@linaro.org>
 M:     Pratyush Yadav <pratyush@kernel.org>
 R:     Michael Walle <michael@walle.cc>
 L:     linux-mtd@lists.infradead.org
@@ -21740,6 +21760,13 @@ T:     git git://linuxtv.org/media_tree.git
 F:     Documentation/admin-guide/media/zr364xx*
 F:     drivers/staging/media/deprecated/zr364xx/
 
+USER DATAGRAM PROTOCOL (UDP)
+M:     Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+S:     Maintained
+F:     include/linux/udp.h
+F:     net/ipv4/udp.c
+F:     net/ipv6/udp.c
+
 USER-MODE LINUX (UML)
 M:     Richard Weinberger <richard@nod.at>
 M:     Anton Ivanov <anton.ivanov@cambridgegreys.com>
@@ -22248,7 +22275,9 @@ F:      drivers/scsi/vmw_pvscsi.c
 F:     drivers/scsi/vmw_pvscsi.h
 
 VMWARE VIRTUAL PTP CLOCK DRIVER
-M:     Vivek Thampi <vithampi@vmware.com>
+M:     Srivatsa S. Bhat (VMware) <srivatsa@csail.mit.edu>
+M:     Deep Shah <sdeep@vmware.com>
+R:     Alexey Makhalov <amakhalov@vmware.com>
 R:     VMware PV-Drivers Reviewers <pv-drivers@vmware.com>
 L:     netdev@vger.kernel.org
 S:     Supported
index d4b6af8c09e9c1a9d85470c2b021dd57649b7e26..c1ead4cd2342a03197e4fdd6a2545abc0dc5b9fd 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 2
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -297,7 +297,7 @@ no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
                        headers_install modules_install kernelrelease image_name
 no-sync-config-targets := $(no-dot-config-targets) %install kernelrelease \
                          image_name
-single-targets := %.a %.i %.rsi %.ko %.lds %.ll %.lst %.mod %.o %.s %.symtypes %/
+single-targets := %.a %.i %.ko %.lds %.ll %.lst %.mod %.o %.rsi %.s %.symtypes %/
 
 config-build   :=
 mixed-build    :=
@@ -549,7 +549,7 @@ LDFLAGS_MODULE  =
 CFLAGS_KERNEL  =
 RUSTFLAGS_KERNEL =
 AFLAGS_KERNEL  =
-export LDFLAGS_vmlinux =
+LDFLAGS_vmlinux =
 
 # Use USERINCLUDE when you must reference the UAPI directories only.
 USERINCLUDE    := \
@@ -1248,6 +1248,18 @@ vmlinux.o modules.builtin.modinfo modules.builtin: vmlinux_o
        @:
 
 PHONY += vmlinux
+# LDFLAGS_vmlinux in the top Makefile defines linker flags for the top vmlinux,
+# not for decompressors. LDFLAGS_vmlinux in arch/*/boot/compressed/Makefile is
+# unrelated; the decompressors just happen to have the same base name,
+# arch/*/boot/compressed/vmlinux.
+# Export LDFLAGS_vmlinux only to scripts/Makefile.vmlinux.
+#
+# _LDFLAGS_vmlinux is a workaround for the 'private export' bug:
+#   https://savannah.gnu.org/bugs/?61463
+# For Make > 4.4, the following simple code will work:
+#  vmlinux: private export LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: private _LDFLAGS_vmlinux := $(LDFLAGS_vmlinux)
+vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
 vmlinux: vmlinux.o $(KBUILD_LDS) modpost
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
 
@@ -1533,6 +1545,7 @@ endif
 # *.ko are usually independent of vmlinux, but CONFIG_DEBUG_INFOBTF_MODULES
 # is an exception.
 ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+KBUILD_BUILTIN := 1
 modules: vmlinux
 endif
 
@@ -1986,7 +1999,7 @@ $(single-no-ko): $(build-dir)
 # Remove MODORDER when done because it is not the real one.
 PHONY += single_modules
 single_modules: $(single-no-ko) modules_prepare
-       $(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$m;) } > $(MODORDER)
+       $(Q){ $(foreach m, $(single-ko), echo $(extmod_prefix)$(m:%.ko=%.o);) } > $(MODORDER)
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
 ifneq ($(KBUILD_MODPOST_NOFINAL),1)
        $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modfinal
index 12933eff419ff877366f804e0488b666a5121230..446861b6b17b2ae628a42a4a32dca94a0b5b1f21 100644 (file)
                        };
 
                        gpio0: gpio@18100 {
-                               compatible = "marvell,armadaxp-gpio",
+                               compatible = "marvell,armada-370-gpio",
                                             "marvell,orion-gpio";
                                reg = <0x18100 0x40>, <0x181c0 0x08>;
                                reg-names = "gpio", "pwm";
                        };
 
                        gpio1: gpio@18140 {
-                               compatible = "marvell,armadaxp-gpio",
+                               compatible = "marvell,armada-370-gpio",
                                             "marvell,orion-gpio";
                                reg = <0x18140 0x40>, <0x181c8 0x08>;
                                reg-names = "gpio", "pwm";
index 1e05208d9f3415f3ee5cd72084a69bf4460d9f8c..9d1cac49c022f07c315efabc57c5f08158c2833a 100644 (file)
                        };
 
                        gpio0: gpio@18100 {
-                               compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+                               compatible = "marvell,orion-gpio";
                                reg = <0x18100 0x40>;
                                ngpios = <32>;
                                gpio-controller;
                        };
 
                        gpio1: gpio@18140 {
-                               compatible = "marvell,armadaxp-gpio", "marvell,orion-gpio";
+                               compatible = "marvell,orion-gpio";
                                reg = <0x18140 0x40>;
                                ngpios = <28>;
                                gpio-controller;
index d1971ddf06a5d4e18b2e80dffe2b8fb4c7bce8b1..7f755e5a4624df2b3a85966b3a0bd269478b6c9a 100644 (file)
        };
 
        pca9849@75 {
-               compatible = "nxp,pca849";
+               compatible = "nxp,pca9849";
                reg = <0x75>;
                #address-cells = <1>;
                #size-cells = <0>;
index 44cd72f1b1be4795e391d63c5088d8f00b54fa92..116e59a3b76d01e54306ced831dfbc2019b05754 100644 (file)
                serial@f995e000 {
                        status = "okay";
                };
+       };
+};
 
-               sdhci@f9824900 {
-                       bus-width = <8>;
-                       non-removable;
-                       status = "okay";
-               };
+&sdhc_1 {
+       bus-width = <8>;
+       non-removable;
+       status = "okay";
+};
 
-               sdhci@f98a4900 {
-                       cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
-                       bus-width = <4>;
-               };
-       };
+&sdhc_2 {
+       cd-gpios = <&tlmm 122 GPIO_ACTIVE_LOW>;
+       bus-width = <4>;
 };
index fe30abfff90a77d10f51ca48105da0bbb7ca5944..4b0d2b4f4b6a9017352eb0b847126c3b723151d2 100644 (file)
                        status = "disabled";
                };
 
-               mmc@f9824900 {
+               sdhc_1: mmc@f9824900 {
                        compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
                        reg = <0xf9824900 0x11c>, <0xf9824000 0x800>;
                        reg-names = "hc", "core";
                        status = "disabled";
                };
 
-               mmc@f98a4900 {
+               sdhc_2: mmc@f98a4900 {
                        compatible = "qcom,apq8084-sdhci", "qcom,sdhci-msm-v4";
                        reg = <0xf98a4900 0x11c>, <0xf98a4000 0x800>;
                        reg-names = "hc", "core";
index 8f5477e307dd49e444d66289663f03048b59b5e2..37a5d96aaf6421a854b96a7d04efc1b54bd11254 100644 (file)
                        mpddrc: mpddrc@ffffe800 {
                                compatible = "microchip,sam9x60-ddramc", "atmel,sama5d3-ddramc";
                                reg = <0xffffe800 0x200>;
-                               clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_CORE PMC_MCK>;
+                               clocks = <&pmc PMC_TYPE_SYSTEM 2>, <&pmc PMC_TYPE_PERIPHERAL 49>;
                                clock-names = "ddrck", "mpddr";
                        };
 
index d865ab5d866b93bf16eac00e33080cecf40041cc..dd23de85100c483386d35307fc93f1e03c987982 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index aef02e6421a36ddf93a975b0798fb92c4326b5d1..7d11c50b9e4087afa226adc052449c3a38b3dd6e 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index 002f221f1694c79c70aba0d9d9e120c006de0a64..c06edd2eacb0cd3f35850f0247bda1dedab4f718 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x4000000>;
        #address-cells = <1>;
        #size-cells = <0>;
index 134a798ad3f23f628e8673fadaf58b777cf64e41..bb40fb46da81d559084f680c11c5c072efa83073 100644 (file)
 
 &qspi {
        pinctrl-names = "default", "sleep";
-       pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a>;
-       pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a>;
+       pinctrl-0 = <&qspi_clk_pins_a
+                    &qspi_bk1_pins_a
+                    &qspi_cs1_pins_a>;
+       pinctrl-1 = <&qspi_clk_sleep_pins_a
+                    &qspi_bk1_sleep_pins_a
+                    &qspi_cs1_sleep_pins_a>;
        reg = <0x58003000 0x1000>, <0x70000000 0x200000>;
        #address-cells = <1>;
        #size-cells = <0>;
index aecc403b2880493d6925eed075bf405b77ee03cb..7f092cb55a4171548da8e4df3c72708512df9264 100644 (file)
@@ -128,15 +128,16 @@ extern int vfp_restore_user_hwstate(struct user_vfp *,
 #define TIF_NEED_RESCHED       1       /* rescheduling necessary */
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_UPROBE             3       /* breakpointed or singlestepping */
-#define TIF_SYSCALL_TRACE      4       /* syscall trace active */
-#define TIF_SYSCALL_AUDIT      5       /* syscall auditing active */
-#define TIF_SYSCALL_TRACEPOINT 6       /* syscall tracepoint instrumentation */
-#define TIF_SECCOMP            7       /* seccomp syscall filtering active */
-#define TIF_NOTIFY_SIGNAL      8       /* signal notifications exist */
+#define TIF_NOTIFY_SIGNAL      4       /* signal notifications exist */
 
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    20
+#define TIF_RESTORE_SIGMASK    19
+#define TIF_SYSCALL_TRACE      20      /* syscall trace active */
+#define TIF_SYSCALL_AUDIT      21      /* syscall auditing active */
+#define TIF_SYSCALL_TRACEPOINT 22      /* syscall tracepoint instrumentation */
+#define TIF_SECCOMP            23      /* seccomp syscall filtering active */
+
 
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
index b8f741a3a37ea02cee2558f951eb9bf297936168..237b828dd2f10a7d99f910edb262e14e90c3e050 100644 (file)
@@ -20,7 +20,6 @@
 
 #include <linux/init.h>
 #include <linux/mc146818rtc.h>
-#include <linux/bcd.h>
 #include <linux/io.h>
 
 #include "common.h"
index 538a960257cc70602f59ee7a07215d7adb838e10..7ec7ada287e050692ee7421a87a1c3b7865cd8c9 100644 (file)
@@ -4,6 +4,7 @@ menuconfig ARCH_OMAP1
        depends on ARCH_MULTI_V4T || ARCH_MULTI_V5
        depends on CPU_LITTLE_ENDIAN
        depends on ATAGS
+       select ARCH_OMAP
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select CLKSRC_MMIO
@@ -45,10 +46,6 @@ config ARCH_OMAP16XX
        select CPU_ARM926T
        select OMAP_DM_TIMER
 
-config ARCH_OMAP1_ANY
-       select ARCH_OMAP
-       def_bool ARCH_OMAP730 || ARCH_OMAP850 || ARCH_OMAP15XX || ARCH_OMAP16XX
-
 config ARCH_OMAP
        bool
 
index 506074b86333fd67d0a705378eea2eab89a4b8b3..0615cb0ba580b03b57c78887dcf24e02e37b0e85 100644 (file)
@@ -3,8 +3,6 @@
 # Makefile for the linux kernel.
 #
 
-ifdef CONFIG_ARCH_OMAP1_ANY
-
 # Common support
 obj-y := io.o id.o sram-init.o sram.o time.o irq.o mux.o flash.o \
         serial.o devices.o dma.o omap-dma.o fb.o
@@ -59,5 +57,3 @@ obj-$(CONFIG_ARCH_OMAP730)            += gpio7xx.o
 obj-$(CONFIG_ARCH_OMAP850)             += gpio7xx.o
 obj-$(CONFIG_ARCH_OMAP15XX)            += gpio15xx.o
 obj-$(CONFIG_ARCH_OMAP16XX)            += gpio16xx.o
-
-endif
index c675f11de99dbdf5e30da119bcc53098e18a5f6d..61fa26efd865340312e12b6fc914c85fbcb6df17 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/gpio.h>
 #include <linux/platform_data/gpio-omap.h>
 #include <linux/soc/ti/omap1-soc.h>
+#include <asm/irq.h>
 
 #include "irqs.h"
 
index d2db9b8aed3fb016b6cbecda12a0ace8c351bf13..0074b011a05a46b34c56ddc0a72e301c9b602aef 100644 (file)
  * The machine specific code may provide the extra mapping besides the
  * default mapping provided here.
  */
-static struct map_desc omap_io_desc[] __initdata = {
+#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
+static struct map_desc omap7xx_io_desc[] __initdata = {
        {
                .virtual        = OMAP1_IO_VIRT,
                .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
                .length         = OMAP1_IO_SIZE,
                .type           = MT_DEVICE
-       }
-};
-
-#if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
-static struct map_desc omap7xx_io_desc[] __initdata = {
+       },
        {
                .virtual        = OMAP7XX_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP7XX_DSP_START),
@@ -49,6 +46,12 @@ static struct map_desc omap7xx_io_desc[] __initdata = {
 
 #ifdef CONFIG_ARCH_OMAP15XX
 static struct map_desc omap1510_io_desc[] __initdata = {
+       {
+               .virtual        = OMAP1_IO_VIRT,
+               .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
+               .length         = OMAP1_IO_SIZE,
+               .type           = MT_DEVICE
+       },
        {
                .virtual        = OMAP1510_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP1510_DSP_START),
@@ -65,6 +68,12 @@ static struct map_desc omap1510_io_desc[] __initdata = {
 
 #if defined(CONFIG_ARCH_OMAP16XX)
 static struct map_desc omap16xx_io_desc[] __initdata = {
+       {
+               .virtual        = OMAP1_IO_VIRT,
+               .pfn            = __phys_to_pfn(OMAP1_IO_PHYS),
+               .length         = OMAP1_IO_SIZE,
+               .type           = MT_DEVICE
+       },
        {
                .virtual        = OMAP16XX_DSP_BASE,
                .pfn            = __phys_to_pfn(OMAP16XX_DSP_START),
@@ -79,18 +88,9 @@ static struct map_desc omap16xx_io_desc[] __initdata = {
 };
 #endif
 
-/*
- * Maps common IO regions for omap1
- */
-static void __init omap1_map_common_io(void)
-{
-       iotable_init(omap_io_desc, ARRAY_SIZE(omap_io_desc));
-}
-
 #if defined (CONFIG_ARCH_OMAP730) || defined (CONFIG_ARCH_OMAP850)
 void __init omap7xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap7xx_io_desc, ARRAY_SIZE(omap7xx_io_desc));
 }
 #endif
@@ -98,7 +98,6 @@ void __init omap7xx_map_io(void)
 #ifdef CONFIG_ARCH_OMAP15XX
 void __init omap15xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
 }
 #endif
@@ -106,7 +105,6 @@ void __init omap15xx_map_io(void)
 #if defined(CONFIG_ARCH_OMAP16XX)
 void __init omap16xx_map_io(void)
 {
-       omap1_map_common_io();
        iotable_init(omap16xx_io_desc, ARRAY_SIZE(omap16xx_io_desc));
 }
 #endif
index 05c25c432449fd8f2a0f609ae644d16aecf3e379..b1632cbe37e6f93d327ad79cbba1c62ce6687d86 100644 (file)
@@ -89,7 +89,6 @@ static struct omap_mcbsp_ops omap1_mcbsp_ops = {
 #define OMAP1610_MCBSP2_BASE   0xfffb1000
 #define OMAP1610_MCBSP3_BASE   0xe1017000
 
-#if defined(CONFIG_ARCH_OMAP730) || defined(CONFIG_ARCH_OMAP850)
 struct resource omap7xx_mcbsp_res[][6] = {
        {
                {
@@ -159,14 +158,7 @@ static struct omap_mcbsp_platform_data omap7xx_mcbsp_pdata[] = {
 };
 #define OMAP7XX_MCBSP_RES_SZ           ARRAY_SIZE(omap7xx_mcbsp_res[1])
 #define OMAP7XX_MCBSP_COUNT            ARRAY_SIZE(omap7xx_mcbsp_res)
-#else
-#define omap7xx_mcbsp_res_0            NULL
-#define omap7xx_mcbsp_pdata            NULL
-#define OMAP7XX_MCBSP_RES_SZ           0
-#define OMAP7XX_MCBSP_COUNT            0
-#endif
 
-#ifdef CONFIG_ARCH_OMAP15XX
 struct resource omap15xx_mcbsp_res[][6] = {
        {
                {
@@ -266,14 +258,7 @@ static struct omap_mcbsp_platform_data omap15xx_mcbsp_pdata[] = {
 };
 #define OMAP15XX_MCBSP_RES_SZ          ARRAY_SIZE(omap15xx_mcbsp_res[1])
 #define OMAP15XX_MCBSP_COUNT           ARRAY_SIZE(omap15xx_mcbsp_res)
-#else
-#define omap15xx_mcbsp_res_0           NULL
-#define omap15xx_mcbsp_pdata           NULL
-#define OMAP15XX_MCBSP_RES_SZ          0
-#define OMAP15XX_MCBSP_COUNT           0
-#endif
 
-#ifdef CONFIG_ARCH_OMAP16XX
 struct resource omap16xx_mcbsp_res[][6] = {
        {
                {
@@ -373,12 +358,6 @@ static struct omap_mcbsp_platform_data omap16xx_mcbsp_pdata[] = {
 };
 #define OMAP16XX_MCBSP_RES_SZ          ARRAY_SIZE(omap16xx_mcbsp_res[1])
 #define OMAP16XX_MCBSP_COUNT           ARRAY_SIZE(omap16xx_mcbsp_res)
-#else
-#define omap16xx_mcbsp_res_0           NULL
-#define omap16xx_mcbsp_pdata           NULL
-#define OMAP16XX_MCBSP_RES_SZ          0
-#define OMAP16XX_MCBSP_COUNT           0
-#endif
 
 static void omap_mcbsp_register_board_cfg(struct resource *res, int res_count,
                        struct omap_mcbsp_platform_data *config, int size)
index d9165709c532388316d2a351a2dc9c9f02b6da95..0d1f092821ff8ff7a8c0a57c759db31879b2c9b0 100644 (file)
 #define OMAP7XX_IDLECT3                0xfffece24
 #define OMAP7XX_IDLE_LOOP_REQUEST      0x0C00
 
-#if     !defined(CONFIG_ARCH_OMAP730) && \
-       !defined(CONFIG_ARCH_OMAP850) && \
-       !defined(CONFIG_ARCH_OMAP15XX) && \
-       !defined(CONFIG_ARCH_OMAP16XX)
-#warning "Power management for this processor not implemented yet"
-#endif
-
 #ifndef __ASSEMBLER__
 
 #include <linux/clk.h>
index b90d98bae68d74b12eac0eba44f9fc49c1df6871..03e25af6f48c97ff4db9b06063836de3b82ce206 100644 (file)
@@ -45,6 +45,8 @@ config MACH_PXA27X_DT
 config MACH_PXA3XX_DT
        bool "Support PXA3xx platforms from device tree"
        select CPU_PXA300
+       select CPU_PXA310
+       select CPU_PXA320
        select PINCTRL
        select POWER_SUPPLY
        select PXA3xx
index 03934808b2ed0717cbc616dbcc49d962b75f0090..c5ccca26a40870f54d96ed33535beee25bfc153a 100644 (file)
@@ -184,8 +184,6 @@ config ARM64
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_DYNAMIC_FTRACE_WITH_ARGS \
-               if $(cc-option,-fpatchable-function-entry=2)
        select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY \
                if DYNAMIC_FTRACE_WITH_ARGS
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
@@ -972,6 +970,22 @@ config ARM64_ERRATUM_2457168
 
          If unsure, say Y.
 
+config ARM64_ERRATUM_2645198
+       bool "Cortex-A715: 2645198: Workaround possible [ESR|FAR]_ELx corruption"
+       default y
+       help
+         This option adds the workaround for ARM Cortex-A715 erratum 2645198.
+
+         If a Cortex-A715 cpu sees a page mapping permissions change from executable
+         to non-executable, it may corrupt the ESR_ELx and FAR_ELx registers on the
+         next instruction abort caused by permission fault.
+
+         Only user-space does executable to non-executable permission transition via
+         mprotect() system call. Workaround the problem by doing a break-before-make
+         TLB invalidation, for all changes to executable user space mappings.
+
+         If unsure, say Y.
+
 config CAVIUM_ERRATUM_22375
        bool "Cavium erratum 22375, 24313"
        default y
index e3486f60645a41726e6c9594986e363e02ded05c..a1f0c38ccaddad76baad2efd7e7865830f737c38 100644 (file)
 };
 
 &usb {
-       phys = <&usb2_phy1>;
-       phy-names = "usb2-phy1";
-};
-
-&usb2_phy0 {
-       status = "disabled";
+       phys = <&usb2_phy0>, <&usb2_phy1>;
+       phy-names = "usb2-phy0", "usb2-phy1";
 };
index 7308f7b6b22c94e72d7336ee6ccf0c41050b18d4..8bce6406913875d38b7d2759561ac8c7db786726 100644 (file)
@@ -98,7 +98,7 @@
 
                        uart1: serial@12100 {
                                compatible = "snps,dw-apb-uart";
-                               reg = <0x11000 0x100>;
+                               reg = <0x12100 0x100>;
                                reg-shift = <2>;
                                interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>;
                                reg-io-width = <1>;
index 87c90e93667f73c65035e649f70f448f16f7d3ca..79de9cc395c4cfe3527943c5117b3b9861ca8735 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (c) 2015, LGE Inc. All rights reserved.
  * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  * Copyright (c) 2021, Petr Vorel <petr.vorel@gmail.com>
+ * Copyright (c) 2022, Dominik Kobinski <dominikkobinski314@gmail.com>
  */
 
 /dts-v1/;
                        reg = <0 0x03400000 0 0x1200000>;
                        no-map;
                };
+
+               removed_region: reserved@5000000 {
+                       reg = <0 0x05000000 0 0x2200000>;
+                       no-map;
+               };
        };
 };
 
index b242c272d2af14ee5b5607c8280de48cb06fa006..fcca1ba94da699d0917a6c9fae9fdf09193259b6 100644 (file)
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/input/gpio-keys.h>
 
+/delete-node/ &adsp_mem;
+/delete-node/ &audio_mem;
+/delete-node/ &mpss_mem;
+/delete-node/ &peripheral_region;
+/delete-node/ &rmtfs_mem;
+
 / {
        model = "Xiaomi Mi 4C";
        compatible = "xiaomi,libra", "qcom,msm8992";
                #size-cells = <2>;
                ranges;
 
-               /* This is for getting crash logs using Android downstream kernels */
-               ramoops@dfc00000 {
-                       compatible = "ramoops";
-                       reg = <0x0 0xdfc00000 0x0 0x40000>;
-                       console-size = <0x10000>;
-                       record-size = <0x10000>;
-                       ftrace-size = <0x10000>;
-                       pmsg-size = <0x20000>;
+               memory_hole: hole@6400000 {
+                       reg = <0 0x06400000 0 0x600000>;
+                       no-map;
+               };
+
+               memory_hole2: hole2@6c00000 {
+                       reg = <0 0x06c00000 0 0x2400000>;
+                       no-map;
+               };
+
+               mpss_mem: mpss@9000000 {
+                       reg = <0 0x09000000 0 0x5a00000>;
+                       no-map;
+               };
+
+               tzapp: tzapp@ea00000 {
+                       reg = <0 0x0ea00000 0 0x1900000>;
+                       no-map;
+               };
+
+               mdm_rfsa_mem: mdm-rfsa@ca0b0000 {
+                       reg = <0 0xca0b0000 0 0x10000>;
+                       no-map;
+               };
+
+               rmtfs_mem: rmtfs@ca100000 {
+                       compatible = "qcom,rmtfs-mem";
+                       reg = <0 0xca100000 0 0x180000>;
+                       no-map;
+
+                       qcom,client-id = <1>;
                };
 
-               modem_region: modem_region@9000000 {
-                       reg = <0x0 0x9000000 0x0 0x5a00000>;
+               audio_mem: audio@cb400000 {
+                       reg = <0 0xcb000000 0 0x400000>;
+                       no-mem;
+               };
+
+               qseecom_mem: qseecom@cb400000 {
+                       reg = <0 0xcb400000 0 0x1c00000>;
+                       no-mem;
+               };
+
+               adsp_rfsa_mem: adsp-rfsa@cd000000 {
+                       reg = <0 0xcd000000 0 0x10000>;
                        no-map;
                };
 
-               tzapp: modem_region@ea00000 {
-                       reg = <0x0 0xea00000 0x0 0x1900000>;
+               sensor_rfsa_mem: sensor-rfsa@cd010000 {
+                       reg = <0 0xcd010000 0 0x10000>;
                        no-map;
                };
+
+               ramoops@dfc00000 {
+                       compatible = "ramoops";
+                       reg = <0 0xdfc00000 0 0x40000>;
+                       console-size = <0x10000>;
+                       record-size = <0x10000>;
+                       ftrace-size = <0x10000>;
+                       pmsg-size = <0x20000>;
+               };
        };
 };
 
        status = "okay";
 };
 
-&peripheral_region {
-       reg = <0x0 0x7400000 0x0 0x1c00000>;
-       no-map;
-};
-
 &pm8994_spmi_regulators {
        VDD_APC0: s8 {
                regulator-min-microvolt = <680000>;
index 10adb4986ef1b97d79e12bbd0a2925dd6ae96ed2..02fc3795dbfd73a5c2905f630a88e42b520e7a2e 100644 (file)
        compatible = "qcom,rpmcc-msm8992", "qcom,rpmcc";
 };
 
-&tcsr_mutex {
-       compatible = "qcom,sfpb-mutex";
-};
-
 &timer {
        interrupts = <GIC_PPI 2 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
                             <GIC_PPI 3 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
index 85abff0e9b3f7c6913f26cbbf42d53d3725b7b85..7b0f62144c3eeafa473c12a61fac8293fa7fe793 100644 (file)
@@ -9,9 +9,6 @@
 
 #include "msm8994.dtsi"
 
-/* Angler's firmware does not report where the memory is allocated */
-/delete-node/ &cont_splash_mem;
-
 / {
        model = "Huawei Nexus 6P";
        compatible = "huawei,angler", "qcom,msm8994";
        chosen {
                stdout-path = "serial0:115200n8";
        };
+
+       reserved-memory {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               ranges;
+
+               tzapp_mem: tzapp@4800000 {
+                       reg = <0 0x04800000 0 0x1900000>;
+                       no-map;
+               };
+
+               removed_region: reserved@6300000 {
+                       reg = <0 0x06300000 0 0xD00000>;
+                       no-map;
+               };
+       };
 };
 
 &blsp1_uart2 {
index 109c9d2b684d115235c895093b7154386bf3e898..71cf81a8eb4da67e0724a5ea36a6f69c93265f42 100644 (file)
@@ -10,6 +10,7 @@
 #include <dt-bindings/interconnect/qcom,sc8280xp.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/mailbox/qcom-ipcc.h>
+#include <dt-bindings/phy/phy-qcom-qmp.h>
 #include <dt-bindings/power/qcom-rpmpd.h>
 #include <dt-bindings/soc/qcom,rpmh-rsc.h>
 #include <dt-bindings/thermal/thermal.h>
                                 <0>,
                                 <0>,
                                 <0>,
-                                <&usb_0_ssphy>,
+                                <&usb_0_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
                                 <0>,
-                                <&usb_1_ssphy>,
+                                <&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>,
                                 <0>,
                                 <0>,
                                 <0>,
                        };
                };
 
-               usb_0_qmpphy: phy-wrapper@88ec000 {
+               usb_0_qmpphy: phy@88eb000 {
                        compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
-                       reg = <0 0x088ec000 0 0x1e4>,
-                             <0 0x088eb000 0 0x40>,
-                             <0 0x088ed000 0 0x1c8>;
-                       #address-cells = <2>;
-                       #size-cells = <2>;
-                       ranges;
+                       reg = <0 0x088eb000 0 0x4000>;
 
                        clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
-                                <&rpmhcc RPMH_CXO_CLK>,
                                 <&gcc GCC_USB4_EUD_CLKREF_CLK>,
-                                <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
-                       clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+                                <&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+                                <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
+                       clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+                       power-domains = <&gcc USB30_PRIM_GDSC>;
 
                        resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
-                                <&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+                                <&gcc GCC_USB4_DP_PHY_PRIM_BCR>;
                        reset-names = "phy", "common";
 
-                       power-domains = <&gcc USB30_PRIM_GDSC>;
+                       #clock-cells = <1>;
+                       #phy-cells = <1>;
 
                        status = "disabled";
-
-                       usb_0_ssphy: usb3-phy@88eb400 {
-                               reg = <0 0x088eb400 0 0x100>,
-                                     <0 0x088eb600 0 0x3ec>,
-                                     <0 0x088ec400 0 0x364>,
-                                     <0 0x088eba00 0 0x100>,
-                                     <0 0x088ebc00 0 0x3ec>,
-                                     <0 0x088ec200 0 0x18>;
-                               #phy-cells = <0>;
-                               #clock-cells = <0>;
-                               clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
-                               clock-names = "pipe0";
-                               clock-output-names = "usb0_phy_pipe_clk_src";
-                       };
                };
 
                usb_1_hsphy: phy@8902000 {
                        status = "disabled";
                };
 
-               usb_1_qmpphy: phy-wrapper@8904000 {
+               usb_1_qmpphy: phy@8903000 {
                        compatible = "qcom,sc8280xp-qmp-usb43dp-phy";
-                       reg = <0 0x08904000 0 0x1e4>,
-                             <0 0x08903000 0 0x40>,
-                             <0 0x08905000 0 0x1c8>;
-                       #address-cells = <2>;
-                       #size-cells = <2>;
-                       ranges;
+                       reg = <0 0x08903000 0 0x4000>;
 
                        clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
-                                <&rpmhcc RPMH_CXO_CLK>,
                                 <&gcc GCC_USB4_CLKREF_CLK>,
-                                <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
-                       clock-names = "aux", "ref_clk_src", "ref", "com_aux";
+                                <&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
+                                <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
+                       clock-names = "aux", "ref", "com_aux", "usb3_pipe";
+
+                       power-domains = <&gcc USB30_SEC_GDSC>;
 
                        resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
                                 <&gcc GCC_USB4_1_DP_PHY_PRIM_BCR>;
                        reset-names = "phy", "common";
 
-                       power-domains = <&gcc USB30_SEC_GDSC>;
+                       #clock-cells = <1>;
+                       #phy-cells = <1>;
 
                        status = "disabled";
-
-                       usb_1_ssphy: usb3-phy@8903400 {
-                               reg = <0 0x08903400 0 0x100>,
-                                     <0 0x08903600 0 0x3ec>,
-                                     <0 0x08904400 0 0x364>,
-                                     <0 0x08903a00 0 0x100>,
-                                     <0 0x08903c00 0 0x3ec>,
-                                     <0 0x08904200 0 0x18>;
-                               #phy-cells = <0>;
-                               #clock-cells = <0>;
-                               clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
-                               clock-names = "pipe0";
-                               clock-output-names = "usb1_phy_pipe_clk_src";
-                       };
                };
 
                pmu@9091000 {
                                reg = <0 0x0a600000 0 0xcd00>;
                                interrupts = <GIC_SPI 803 IRQ_TYPE_LEVEL_HIGH>;
                                iommus = <&apps_smmu 0x820 0x0>;
-                               phys = <&usb_0_hsphy>, <&usb_0_ssphy>;
+                               phys = <&usb_0_hsphy>, <&usb_0_qmpphy QMP_USB43DP_USB3_PHY>;
                                phy-names = "usb2-phy", "usb3-phy";
                        };
                };
                                reg = <0 0x0a800000 0 0xcd00>;
                                interrupts = <GIC_SPI 810 IRQ_TYPE_LEVEL_HIGH>;
                                iommus = <&apps_smmu 0x860 0x0>;
-                               phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
+                               phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
                                phy-names = "usb2-phy", "usb3-phy";
                        };
                };
index dab5579946f351f1a43314f121fc5682556f29ff..927032863e2f15e9d63b57a3b66956d6243ea9ed 100644 (file)
                                exit-latency-us = <6562>;
                                min-residency-us = <9987>;
                                local-timer-stop;
-                               status = "disabled";
                        };
                };
        };
index 245dce24ec599c0d8b062f9953a234a792508616..fb3cd20a82b5eea676ccadcfa108031a747fd957 100644 (file)
                                 <&rpmhcc RPMH_CXO_CLK>;
                        clock-names = "iface", "core", "xo";
                        resets = <&gcc GCC_SDCC2_BCR>;
-                       interconnects = <&aggre2_noc MASTER_SDCC_2 0 &mc_virt SLAVE_EBI1 0>,
-                                       <&gem_noc MASTER_APPSS_PROC 0 &config_noc SLAVE_SDCC_2 0>;
+                       interconnects = <&aggre2_noc MASTER_SDCC_2 &mc_virt SLAVE_EBI1>,
+                                       <&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_SDCC_2>;
                        interconnect-names = "sdhc-ddr","cpu-sdhc";
                        iommus = <&apps_smmu 0x4a0 0x0>;
                        power-domains = <&rpmhpd SM8350_CX>;
index 028207c4afd0f76037336cf030375dd9b3ab4011..fa85856f33ceeed098f5bba2963d301e9280d544 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/cfi_types.h>
 #include <asm/assembler.h>
 #include "sm4-ce-asm.h"
 
@@ -104,7 +105,7 @@ SYM_FUNC_START(sm4_ce_ccm_final)
 SYM_FUNC_END(sm4_ce_ccm_final)
 
 .align 3
-SYM_FUNC_START(sm4_ce_ccm_enc)
+SYM_TYPED_FUNC_START(sm4_ce_ccm_enc)
        /* input:
         *   x0: round key array, CTX
         *   x1: dst
@@ -216,7 +217,7 @@ SYM_FUNC_START(sm4_ce_ccm_enc)
 SYM_FUNC_END(sm4_ce_ccm_enc)
 
 .align 3
-SYM_FUNC_START(sm4_ce_ccm_dec)
+SYM_TYPED_FUNC_START(sm4_ce_ccm_dec)
        /* input:
         *   x0: round key array, CTX
         *   x1: dst
index 7aa3ec18a28912bc13d74c6b187b55a5b2d2cf2e..347f25d7572793a1b9d1f853e62ecc553521167b 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/cfi_types.h>
 #include <asm/assembler.h>
 #include "sm4-ce-asm.h"
 
@@ -370,7 +371,7 @@ SYM_FUNC_START(pmull_ghash_update)
 SYM_FUNC_END(pmull_ghash_update)
 
 .align 3
-SYM_FUNC_START(sm4_ce_pmull_gcm_enc)
+SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_enc)
        /* input:
         *   x0: round key array, CTX
         *   x1: dst
@@ -581,7 +582,7 @@ SYM_FUNC_END(sm4_ce_pmull_gcm_enc)
 #define        RH3     v20
 
 .align 3
-SYM_FUNC_START(sm4_ce_pmull_gcm_dec)
+SYM_TYPED_FUNC_START(sm4_ce_pmull_gcm_dec)
        /* input:
         *   x0: round key array, CTX
         *   x1: dst
index 0890e4f568fb7f9804566fbf82b2b79bea0732df..cbb3d961123b17aba398f29c2a346ac2e7f5bf47 100644 (file)
@@ -315,7 +315,7 @@ __ll_sc__cmpxchg_double##name(unsigned long old1,                   \
        "       cbnz    %w0, 1b\n"                                      \
        "       " #mb "\n"                                              \
        "2:"                                                            \
-       : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)        \
+       : "=&r" (tmp), "=&r" (ret), "+Q" (*(__uint128_t *)ptr)          \
        : "r" (old1), "r" (old2), "r" (new1), "r" (new2)                \
        : cl);                                                          \
                                                                        \
index 52075e93de6c01a0b7034e0a14887e43a9167137..a94d6dacc0292e3e92b644636c923338556b3222 100644 (file)
@@ -311,7 +311,7 @@ __lse__cmpxchg_double##name(unsigned long old1,                             \
        "       eor     %[old2], %[old2], %[oldval2]\n"                 \
        "       orr     %[old1], %[old1], %[old2]"                      \
        : [old1] "+&r" (x0), [old2] "+&r" (x1),                         \
-         [v] "+Q" (*(unsigned long *)ptr)                              \
+         [v] "+Q" (*(__uint128_t *)ptr)                                \
        : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),             \
          [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)              \
        : cl);                                                          \
index 4e8b66c74ea2b20b1ee1e22623ed76ac05e6750e..683ca3af408485aa6cdf4aa44b5988d91cd6a0f3 100644 (file)
 #define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
+#define APPLE_CPU_PART_M2_BLIZZARD     0x032
+#define APPLE_CPU_PART_M2_AVALANCHE    0x033
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 15b34fbfca66735fe657d746149105d0b8d5a32d..206de10524e338c9406d57641170f374527dd178 100644 (file)
 #define ESR_ELx_FSC_ACCESS     (0x08)
 #define ESR_ELx_FSC_FAULT      (0x04)
 #define ESR_ELx_FSC_PERM       (0x0C)
+#define ESR_ELx_FSC_SEA_TTW0   (0x14)
+#define ESR_ELx_FSC_SEA_TTW1   (0x15)
+#define ESR_ELx_FSC_SEA_TTW2   (0x16)
+#define ESR_ELx_FSC_SEA_TTW3   (0x17)
+#define ESR_ELx_FSC_SECC       (0x18)
+#define ESR_ELx_FSC_SECC_TTW0  (0x1c)
+#define ESR_ELx_FSC_SECC_TTW1  (0x1d)
+#define ESR_ELx_FSC_SECC_TTW2  (0x1e)
+#define ESR_ELx_FSC_SECC_TTW3  (0x1f)
 
 /* ISS field definitions for Data Aborts */
 #define ESR_ELx_ISV_SHIFT      (24)
index d20f5da2d76fa3ef876e99b14c28705da0d0ace1..6a4a1ab8eb238f148177a30e2c32f7f912e8bd31 100644 (file)
@@ -49,6 +49,15 @@ extern pte_t huge_ptep_get(pte_t *ptep);
 
 void __init arm64_hugetlb_cma_reserve(void);
 
+#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
+extern pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep);
+
+#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
+extern void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep,
+                                        pte_t old_pte, pte_t new_pte);
+
 #include <asm-generic/hugetlb.h>
 
 #endif /* __ASM_HUGETLB_H */
index 0df3fc3a017371cf402efc50c32fe326a4e14711..26b0c97df98636274163ab0bb6a5ea5e5c6f38bc 100644 (file)
                                 BIT(18) |              \
                                 GENMASK(16, 15))
 
-/* For compatibility with fault code shared with 32-bit */
-#define FSC_FAULT      ESR_ELx_FSC_FAULT
-#define FSC_ACCESS     ESR_ELx_FSC_ACCESS
-#define FSC_PERM       ESR_ELx_FSC_PERM
-#define FSC_SEA                ESR_ELx_FSC_EXTABT
-#define FSC_SEA_TTW0   (0x14)
-#define FSC_SEA_TTW1   (0x15)
-#define FSC_SEA_TTW2   (0x16)
-#define FSC_SEA_TTW3   (0x17)
-#define FSC_SECC       (0x18)
-#define FSC_SECC_TTW0  (0x1c)
-#define FSC_SECC_TTW1  (0x1d)
-#define FSC_SECC_TTW2  (0x1e)
-#define FSC_SECC_TTW3  (0x1f)
-
 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 #define HPFAR_MASK     (~UL(0xf))
 /*
index 9bdba47f7e149f90055657aa251155708a806ad6..193583df2d9c45761ded5a28a750e7d539d96945 100644 (file)
@@ -349,16 +349,16 @@ static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *v
 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
 {
        switch (kvm_vcpu_trap_get_fault(vcpu)) {
-       case FSC_SEA:
-       case FSC_SEA_TTW0:
-       case FSC_SEA_TTW1:
-       case FSC_SEA_TTW2:
-       case FSC_SEA_TTW3:
-       case FSC_SECC:
-       case FSC_SECC_TTW0:
-       case FSC_SECC_TTW1:
-       case FSC_SECC_TTW2:
-       case FSC_SECC_TTW3:
+       case ESR_ELx_FSC_EXTABT:
+       case ESR_ELx_FSC_SEA_TTW0:
+       case ESR_ELx_FSC_SEA_TTW1:
+       case ESR_ELx_FSC_SEA_TTW2:
+       case ESR_ELx_FSC_SEA_TTW3:
+       case ESR_ELx_FSC_SECC:
+       case ESR_ELx_FSC_SECC_TTW0:
+       case ESR_ELx_FSC_SECC_TTW1:
+       case ESR_ELx_FSC_SECC_TTW2:
+       case ESR_ELx_FSC_SECC_TTW3:
                return true;
        default:
                return false;
@@ -373,8 +373,26 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
 
 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
 {
-       if (kvm_vcpu_abt_iss1tw(vcpu))
-               return true;
+       if (kvm_vcpu_abt_iss1tw(vcpu)) {
+               /*
+                * Only a permission fault on a S1PTW should be
+                * considered as a write. Otherwise, page tables baked
+                * in a read-only memslot will result in an exception
+                * being delivered in the guest.
+                *
+                * The drawback is that we end-up faulting twice if the
+                * guest is using any of HW AF/DB: a translation fault
+                * to map the page containing the PT (read only at
+                * first), then a permission fault to allow the flags
+                * to be set.
+                */
+               switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
+               case ESR_ELx_FSC_PERM:
+                       return true;
+               default:
+                       return false;
+               }
+       }
 
        if (kvm_vcpu_trap_is_iabt(vcpu))
                return false;
index b4bbeed80fb6ed1d37334a35b6ed49ce2bc0dcbc..65e78999c75d76ea8e1443baec542d5f80c871d9 100644 (file)
@@ -681,7 +681,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 #define pud_leaf(pud)          (pud_present(pud) && !pud_table(pud))
 #define pud_valid(pud)         pte_valid(pud_pte(pud))
 #define pud_user(pud)          pte_user(pud_pte(pud))
-
+#define pud_user_exec(pud)     pte_user_exec(pud_pte(pud))
 
 static inline void set_pud(pud_t *pudp, pud_t pud)
 {
@@ -730,6 +730,7 @@ static inline pmd_t *pud_pgtable(pud_t pud)
 #else
 
 #define pud_page_paddr(pud)    ({ BUILD_BUG(); 0; })
+#define pud_user_exec(pud)     pud_user(pud) /* Always 0 with folding */
 
 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
 #define pmd_set_fixmap(addr)           NULL
@@ -862,12 +863,12 @@ static inline bool pte_user_accessible_page(pte_t pte)
 
 static inline bool pmd_user_accessible_page(pmd_t pmd)
 {
-       return pmd_leaf(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
+       return pmd_leaf(pmd) && !pmd_present_invalid(pmd) && (pmd_user(pmd) || pmd_user_exec(pmd));
 }
 
 static inline bool pud_user_accessible_page(pud_t pud)
 {
-       return pud_leaf(pud) && pud_user(pud);
+       return pud_leaf(pud) && (pud_user(pud) || pud_user_exec(pud));
 }
 #endif
 
@@ -1093,6 +1094,15 @@ static inline bool pud_sect_supported(void)
 }
 
 
+#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+#define ptep_modify_prot_start ptep_modify_prot_start
+extern pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep);
+
+#define ptep_modify_prot_commit ptep_modify_prot_commit
+extern void ptep_modify_prot_commit(struct vm_area_struct *vma,
+                                   unsigned long addr, pte_t *ptep,
+                                   pte_t old_pte, pte_t new_pte);
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_PGTABLE_H */
index ba4bff5ca674943100a51fcfc3810d295514a9d4..2b09495499c6183a155e81b3fc33d13cb6e62012 100644 (file)
@@ -16,7 +16,7 @@
 #define UPROBE_SWBP_INSN_SIZE  AARCH64_INSN_SIZE
 #define UPROBE_XOL_SLOT_BYTES  MAX_UINSN_BYTES
 
-typedef u32 uprobe_opcode_t;
+typedef __le32 uprobe_opcode_t;
 
 struct arch_uprobe_task {
 };
index 89ac00084f38a4584924bd2da8e2e0e9c5341a07..307faa2b4395ed9fcacdbf9f992111cd376dc601 100644 (file)
@@ -661,6 +661,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
        },
 #endif
+#ifdef CONFIG_ARM64_ERRATUM_2645198
+       {
+               .desc = "ARM erratum 2645198",
+               .capability = ARM64_WORKAROUND_2645198,
+               ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
+       },
+#endif
 #ifdef CONFIG_ARM64_ERRATUM_2077057
        {
                .desc = "ARM erratum 2077057",
index a00886410537d6a615c7f415a884631b1f0e4a1d..d872d18101d837508d1a4dd2cef742cc7c83e241 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 
 SYM_FUNC_START(__efi_rt_asm_wrapper)
        stp     x29, x30, [sp, #-112]!
index 353009d7f307f40e982adf627a3ad9e408c80a50..2e94d20c4ac7a780095703b69b3da0fccce37537 100644 (file)
@@ -8,28 +8,27 @@
 #include <asm/cpufeature.h>
 #include <asm/mte.h>
 
-#define for_each_mte_vma(vmi, vma)                                     \
+#define for_each_mte_vma(cprm, i, m)                                   \
        if (system_supports_mte())                                      \
-               for_each_vma(vmi, vma)                                  \
-                       if (vma->vm_flags & VM_MTE)
+               for (i = 0, m = cprm->vma_meta;                         \
+                    i < cprm->vma_count;                               \
+                    i++, m = cprm->vma_meta + i)                       \
+                       if (m->flags & VM_MTE)
 
-static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
+static unsigned long mte_vma_tag_dump_size(struct core_vma_metadata *m)
 {
-       if (vma->vm_flags & VM_DONTDUMP)
-               return 0;
-
-       return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
+       return (m->dump_size >> PAGE_SHIFT) * MTE_PAGE_TAG_STORAGE;
 }
 
 /* Derived from dump_user_range(); start/end must be page-aligned */
 static int mte_dump_tag_range(struct coredump_params *cprm,
-                             unsigned long start, unsigned long end)
+                             unsigned long start, unsigned long len)
 {
        int ret = 1;
        unsigned long addr;
        void *tags = NULL;
 
-       for (addr = start; addr < end; addr += PAGE_SIZE) {
+       for (addr = start; addr < start + len; addr += PAGE_SIZE) {
                struct page *page = get_dump_page(addr);
 
                /*
@@ -65,7 +64,6 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
                mte_save_page_tags(page_address(page), tags);
                put_page(page);
                if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
-                       mte_free_tag_storage(tags);
                        ret = 0;
                        break;
                }
@@ -77,13 +75,13 @@ static int mte_dump_tag_range(struct coredump_params *cprm,
        return ret;
 }
 
-Elf_Half elf_core_extra_phdrs(void)
+Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        int vma_count = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
+       for_each_mte_vma(cprm, i, m)
                vma_count++;
 
        return vma_count;
@@ -91,18 +89,18 @@ Elf_Half elf_core_extra_phdrs(void)
 
 int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
+       int i;
+       struct core_vma_metadata *m;
 
-       for_each_mte_vma(vmi, vma) {
+       for_each_mte_vma(cprm, i, m) {
                struct elf_phdr phdr;
 
                phdr.p_type = PT_AARCH64_MEMTAG_MTE;
                phdr.p_offset = offset;
-               phdr.p_vaddr = vma->vm_start;
+               phdr.p_vaddr = m->start;
                phdr.p_paddr = 0;
-               phdr.p_filesz = mte_vma_tag_dump_size(vma);
-               phdr.p_memsz = vma->vm_end - vma->vm_start;
+               phdr.p_filesz = mte_vma_tag_dump_size(m);
+               phdr.p_memsz = m->end - m->start;
                offset += phdr.p_filesz;
                phdr.p_flags = 0;
                phdr.p_align = 0;
@@ -114,28 +112,25 @@ int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
+       int i;
+       struct core_vma_metadata *m;
        size_t data_size = 0;
-       VMA_ITERATOR(vmi, current->mm, 0);
 
-       for_each_mte_vma(vmi, vma)
-               data_size += mte_vma_tag_dump_size(vma);
+       for_each_mte_vma(cprm, i, m)
+               data_size += mte_vma_tag_dump_size(m);
 
        return data_size;
 }
 
 int elf_core_write_extra_data(struct coredump_params *cprm)
 {
-       struct vm_area_struct *vma;
-       VMA_ITERATOR(vmi, current->mm, 0);
-
-       for_each_mte_vma(vmi, vma) {
-               if (vma->vm_flags & VM_DONTDUMP)
-                       continue;
+       int i;
+       struct core_vma_metadata *m;
 
-               if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
+       for_each_mte_vma(cprm, i, m) {
+               if (!mte_dump_tag_range(cprm, m->start, m->dump_size))
                        return 0;
        }
 
index dcc81e7200d4011c99a448f90853263d321a7e65..b6ef1af0122ebde4984b504e8a14578fe6996ffb 100644 (file)
@@ -385,7 +385,7 @@ static void task_fpsimd_load(void)
        WARN_ON(!system_supports_fpsimd());
        WARN_ON(!have_cpu_fpsimd_context());
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                switch (current->thread.fp_type) {
                case FP_STATE_FPSIMD:
                        /* Stop tracking SVE for this task until next use. */
index 2686ab15760173d1dc11805f579daf4d7e563ba5..0c321ad23cd3a48d60a52f3a0a1209c4b7218ac2 100644 (file)
@@ -1357,7 +1357,7 @@ enum aarch64_regset {
 #ifdef CONFIG_ARM64_SVE
        REGSET_SVE,
 #endif
-#ifdef CONFIG_ARM64_SVE
+#ifdef CONFIG_ARM64_SME
        REGSET_SSVE,
        REGSET_ZA,
 #endif
index e0d09bf5b01b80a8ed637bc1f0dd51168f0a55a2..be279fd482480de2a3b2dfff80a37300907555d8 100644 (file)
@@ -281,7 +281,12 @@ static int restore_sve_fpsimd_context(struct user_ctxs *user)
 
                vl = task_get_sme_vl(current);
        } else {
-               if (!system_supports_sve())
+               /*
+                * A SME only system use SVE for streaming mode so can
+                * have a SVE formatted context with a zero VL and no
+                * payload data.
+                */
+               if (!system_supports_sve() && !system_supports_sme())
                        return -EINVAL;
 
                vl = task_get_sve_vl(current);
@@ -732,7 +737,7 @@ static int setup_sigframe_layout(struct rt_sigframe_user_layout *user,
                        return err;
        }
 
-       if (system_supports_sve()) {
+       if (system_supports_sve() || system_supports_sme()) {
                unsigned int vq = 0;
 
                if (add_all || test_thread_flag(TIF_SVE) ||
index 1b8a2dcd712f321c94b3304012770a62ef6bc61a..9ddcfe2c3e574fc8d85b01d91f244f2091350247 100644 (file)
@@ -60,7 +60,7 @@ static inline bool __get_fault_info(u64 esr, struct kvm_vcpu_fault_info *fault)
         */
        if (!(esr & ESR_ELx_S1PTW) &&
            (cpus_have_final_cap(ARM64_WORKAROUND_834220) ||
-            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+            (esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
index 3330d1b76bdd2015592d35e9a6ef55502050e885..07d37ff88a3f2a69352ae123a08cf37ebfcb273e 100644 (file)
@@ -367,7 +367,7 @@ static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
                bool valid;
 
-               valid = kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
+               valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT &&
                        kvm_vcpu_dabt_isvalid(vcpu) &&
                        !kvm_vcpu_abt_issea(vcpu) &&
                        !kvm_vcpu_abt_iss1tw(vcpu);
index 31d7fa4c7c140513f6b69034cafd5f23a8a88567..a3ee3b605c9b808ccd091ef8651401006ea96d26 100644 (file)
@@ -1212,7 +1212,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
        exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
        VM_BUG_ON(write_fault && exec_fault);
 
-       if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
+       if (fault_status == ESR_ELx_FSC_PERM && !write_fault && !exec_fault) {
                kvm_err("Unexpected L2 read permission error\n");
                return -EFAULT;
        }
@@ -1277,7 +1277,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * only exception to this is when dirty logging is enabled at runtime
         * and a write fault needs to collapse a block entry into a table.
         */
-       if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+       if (fault_status != ESR_ELx_FSC_PERM ||
+           (logging_active && write_fault)) {
                ret = kvm_mmu_topup_memory_cache(memcache,
                                                 kvm_mmu_cache_min_pages(kvm));
                if (ret)
@@ -1342,7 +1343,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * backed by a THP and thus use block mapping if possible.
         */
        if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
-               if (fault_status == FSC_PERM && fault_granule > PAGE_SIZE)
+               if (fault_status ==  ESR_ELx_FSC_PERM &&
+                   fault_granule > PAGE_SIZE)
                        vma_pagesize = fault_granule;
                else
                        vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
@@ -1350,7 +1352,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                                                                   &fault_ipa);
        }
 
-       if (fault_status != FSC_PERM && !device && kvm_has_mte(kvm)) {
+       if (fault_status != ESR_ELx_FSC_PERM && !device && kvm_has_mte(kvm)) {
                /* Check the VMM hasn't introduced a new disallowed VMA */
                if (kvm_vma_mte_allowed(vma)) {
                        sanitise_mte_tags(kvm, pfn, vma_pagesize);
@@ -1376,7 +1378,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
         * permissions only if vma_pagesize equals fault_granule. Otherwise,
         * kvm_pgtable_stage2_map() should be called to change block size.
         */
-       if (fault_status == FSC_PERM && vma_pagesize == fault_granule)
+       if (fault_status == ESR_ELx_FSC_PERM && vma_pagesize == fault_granule)
                ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
        else
                ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
@@ -1441,7 +1443,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
        is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
 
-       if (fault_status == FSC_FAULT) {
+       if (fault_status == ESR_ELx_FSC_FAULT) {
                /* Beyond sanitised PARange (which is the IPA limit) */
                if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) {
                        kvm_inject_size_fault(vcpu);
@@ -1476,8 +1478,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                              kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
        /* Check the stage-2 fault is trans. fault or write fault */
-       if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
-           fault_status != FSC_ACCESS) {
+       if (fault_status != ESR_ELx_FSC_FAULT &&
+           fault_status != ESR_ELx_FSC_PERM &&
+           fault_status != ESR_ELx_FSC_ACCESS) {
                kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
                        kvm_vcpu_trap_get_class(vcpu),
                        (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
@@ -1539,7 +1542,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        /* Userspace should not be able to register out-of-bounds IPAs */
        VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm));
 
-       if (fault_status == FSC_ACCESS) {
+       if (fault_status == ESR_ELx_FSC_ACCESS) {
                handle_access_fault(vcpu, fault_ipa);
                ret = 1;
                goto out_unlock;
index d5ee52d6bf7326f9254298631ef2fc0824c0e176..c6cbfe6b854b310f74608c87136b81b5691ef51f 100644 (file)
@@ -646,7 +646,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
                return;
 
        /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
-       pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
+       pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
        if (!kvm_supports_32bit_el0())
                pmcr |= ARMV8_PMU_PMCR_LC;
 
index 826ff6f2a4e7b772fa4c51a3eb901399b1d03b0f..2074521d4a8ce57ba5e4d73abe03e8b43223d487 100644 (file)
@@ -616,6 +616,8 @@ static const struct midr_range broken_seis[] = {
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_PRO),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM_MAX),
        MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM_MAX),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_BLIZZARD),
+       MIDR_ALL_VERSIONS(MIDR_APPLE_M2_AVALANCHE),
        {},
 };
 
index 35e9a468d13e6ac68093c7516350815df5b009b5..95364e8bdc194491ce298197d1a52896af511c53 100644 (file)
@@ -559,3 +559,24 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
 {
        return __hugetlb_valid_size(size);
 }
+
+pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return huge_ptep_clear_flush(vma, addr, ptep);
+       }
+       return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                                 pte_t old_pte, pte_t pte)
+{
+       set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index 14c87e8d69d8391c995616a749c665cabf4aaddf..d77c9f56b7b435d43f906445ecaef3554530c693 100644 (file)
@@ -1630,3 +1630,24 @@ static int __init prevent_bootmem_remove_init(void)
 }
 early_initcall(prevent_bootmem_remove_init);
 #endif
+
+pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
+{
+       if (IS_ENABLED(CONFIG_ARM64_ERRATUM_2645198) &&
+           cpus_have_const_cap(ARM64_WORKAROUND_2645198)) {
+               /*
+                * Break-before-make (BBM) is required for all user space mappings
+                * when the permission changes from executable to non-executable
+                * in cases where cpu is affected with errata #2645198.
+                */
+               if (pte_user_exec(READ_ONCE(*ptep)))
+                       return ptep_clear_flush(vma, addr, ptep);
+       }
+       return ptep_get_and_clear(vma->vm_mm, addr, ptep);
+}
+
+void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep,
+                            pte_t old_pte, pte_t pte)
+{
+       set_pte_at(vma->vm_mm, addr, ptep, pte);
+}
index a86ee376920a08ddfdb7f116336a90ed13e88629..dfeb2c51e2573dc07d58be19e58cb89d568051ab 100644 (file)
@@ -71,6 +71,7 @@ WORKAROUND_2038923
 WORKAROUND_2064142
 WORKAROUND_2077057
 WORKAROUND_2457168
+WORKAROUND_2645198
 WORKAROUND_2658417
 WORKAROUND_TRBE_OVERWRITE_FILL_MODE
 WORKAROUND_TSB_FLUSH_FAILURE
index 94680521fbf919cf736b96ec1264dc9e50e9d92b..8895df12154047913fd7099fa4bdf487e3ade8d3 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf64_Half elf_core_extra_phdrs(void)
+Elf64_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return GATE_EHDR->e_phnum;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        const struct elf_phdr *const gate_phdrs =
                (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
index 90f9d3399b2aafe62a495e93f0735898d4b28715..3418d32d4fc7d2d24a96563132d42f2bcc012d38 100644 (file)
@@ -10,8 +10,6 @@
 #define FTRACE_REGS_PLT_IDX    1
 #define NR_FTRACE_PLTS         2
 
-#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1]))
-
 #ifdef CONFIG_FUNCTION_TRACER
 
 #define MCOUNT_INSN_SIZE 4             /* sizeof mcount call */
index c00e1512d4fa33e49bf4b6c4627460938510de75..7eedd83fd0d72127fc570cd87dc759f1d8b6b12f 100644 (file)
@@ -377,14 +377,6 @@ static inline bool unsigned_imm_check(unsigned long val, unsigned int bit)
        return val < (1UL << bit);
 }
 
-static inline unsigned long sign_extend(unsigned long val, unsigned int idx)
-{
-       if (!is_imm_negative(val, idx + 1))
-               return ((1UL << idx) - 1) & val;
-       else
-               return ~((1UL << idx) - 1) | val;
-}
-
 #define DEF_EMIT_REG0I26_FORMAT(NAME, OP)                              \
 static inline void emit_##NAME(union loongarch_instruction *insn,      \
                               int offset)                              \
@@ -401,6 +393,7 @@ static inline void emit_##NAME(union loongarch_instruction *insn,   \
 }
 
 DEF_EMIT_REG0I26_FORMAT(b, b_op)
+DEF_EMIT_REG0I26_FORMAT(bl, bl_op)
 
 #define DEF_EMIT_REG1I20_FORMAT(NAME, OP)                              \
 static inline void emit_##NAME(union loongarch_instruction *insn,      \
index f2b52b9ea93d2862d69e9e7f1075f6e1bcfee619..b9dce87afd2e058a3dd786ccbb513a3bbbff637c 100644 (file)
@@ -8,7 +8,9 @@
 #define _ASM_UNWIND_H
 
 #include <linux/sched.h>
+#include <linux/ftrace.h>
 
+#include <asm/ptrace.h>
 #include <asm/stacktrace.h>
 
 enum unwinder_type {
@@ -20,11 +22,13 @@ struct unwind_state {
        char type; /* UNWINDER_XXX */
        struct stack_info stack_info;
        struct task_struct *task;
-       bool first, error, is_ftrace;
+       bool first, error, reset;
        int graph_idx;
        unsigned long sp, pc, ra;
 };
 
+bool default_next_frame(struct unwind_state *state);
+
 void unwind_start(struct unwind_state *state,
                  struct task_struct *task, struct pt_regs *regs);
 bool unwind_next_frame(struct unwind_state *state);
@@ -40,4 +44,39 @@ static inline bool unwind_error(struct unwind_state *state)
        return state->error;
 }
 
+#define GRAPH_FAKE_OFFSET (sizeof(struct pt_regs) - offsetof(struct pt_regs, regs[1]))
+
+static inline unsigned long unwind_graph_addr(struct unwind_state *state,
+                                       unsigned long pc, unsigned long cfa)
+{
+       return ftrace_graph_ret_addr(state->task, &state->graph_idx,
+                                    pc, (unsigned long *)(cfa - GRAPH_FAKE_OFFSET));
+}
+
+static __always_inline void __unwind_start(struct unwind_state *state,
+                                       struct task_struct *task, struct pt_regs *regs)
+{
+       memset(state, 0, sizeof(*state));
+       if (regs) {
+               state->sp = regs->regs[3];
+               state->pc = regs->csr_era;
+               state->ra = regs->regs[1];
+       } else if (task && task != current) {
+               state->sp = thread_saved_fp(task);
+               state->pc = thread_saved_ra(task);
+               state->ra = 0;
+       } else {
+               state->sp = (unsigned long)__builtin_frame_address(0);
+               state->pc = (unsigned long)__builtin_return_address(0);
+               state->ra = 0;
+       }
+       state->task = task;
+       get_stack_info(state->sp, state->task, &state->stack_info);
+       state->pc = unwind_graph_addr(state, state->pc, state->sp);
+}
+
+static __always_inline unsigned long __unwind_get_return_address(struct unwind_state *state)
+{
+       return unwind_done(state) ? 0 : state->pc;
+}
 #endif /* _ASM_UNWIND_H */
index fcaa024a685ec6df4a983daa59dbf2f967b038af..c8cfbd562921d63fe2a15aa6fb8a6940ff356fd4 100644 (file)
@@ -8,7 +8,7 @@ extra-y         := vmlinux.lds
 obj-y          += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
                   traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
                   elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
-                  alternative.o unaligned.o
+                  alternative.o unaligned.o unwind.o
 
 obj-$(CONFIG_ACPI)             += acpi.o
 obj-$(CONFIG_EFI)              += efi.o
index c5aebeac960b6e19908550185a49363c54f0768e..4ad13847e9626eab6dfa5022b0d51ff5a40459ad 100644 (file)
@@ -74,7 +74,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
        switch (src->reg0i26_format.opcode) {
        case b_op:
        case bl_op:
-               jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 27);
+               jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
@@ -93,7 +93,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
                fallthrough;
        case beqz_op:
        case bnez_op:
-               jump_addr = cur_pc + sign_extend((si_h << 16 | si_l) << 2, 22);
+               jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
@@ -112,7 +112,7 @@ static void __init_or_module recompute_jump(union loongarch_instruction *buf,
        case bge_op:
        case bltu_op:
        case bgeu_op:
-               jump_addr = cur_pc + sign_extend(si << 2, 17);
+               jump_addr = cur_pc + sign_extend64(si << 2, 17);
                if (in_alt_jump(jump_addr, start, end))
                        return;
                offset = jump_addr - pc;
index 255a09876ef28d1b10c67fd1b9f8ca4814f8aefc..3a3fce2d784611e1118e6a6d5dab1b509501dd9c 100644 (file)
@@ -94,7 +94,7 @@ static void cpu_probe_common(struct cpuinfo_loongarch *c)
        c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
                     LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
 
-       elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+       elf_hwcap = HWCAP_LOONGARCH_CPUCFG | HWCAP_LOONGARCH_CRC32;
 
        config = read_cpucfg(LOONGARCH_CPUCFG1);
        if (config & CPUCFG1_UAL) {
index 75e5be807a0d5004feb5f1e6eb42f8bcc9efef5f..7e5c293ed89f70c4872d54dfcad422673dc65112 100644 (file)
@@ -67,14 +67,17 @@ SYM_FUNC_END(except_vec_cex)
        .macro  BUILD_HANDLER exception handler prep
        .align  5
        SYM_FUNC_START(handle_\exception)
+       666:
        BACKUP_T0T1
        SAVE_ALL
        build_prep_\prep
        move    a0, sp
        la.abs  t0, do_\handler
        jirl    ra, t0, 0
+       668:
        RESTORE_ALL_AND_RET
        SYM_FUNC_END(handle_\exception)
+       SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
        .endm
 
        BUILD_HANDLER ade ade badv
index 512579d79b221ccb632cbff1c3140233e3a5dda0..badc590870423433495616c47e53022e4cf96f35 100644 (file)
@@ -58,7 +58,6 @@ u32 larch_insn_gen_nop(void)
 u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
 {
        long offset = dest - pc;
-       unsigned int immediate_l, immediate_h;
        union loongarch_instruction insn;
 
        if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
@@ -66,15 +65,7 @@ u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
                return INSN_BREAK;
        }
 
-       offset >>= 2;
-
-       immediate_l = offset & 0xffff;
-       offset >>= 16;
-       immediate_h = offset & 0x3ff;
-
-       insn.reg0i26_format.opcode = b_op;
-       insn.reg0i26_format.immediate_l = immediate_l;
-       insn.reg0i26_format.immediate_h = immediate_h;
+       emit_b(&insn, offset >> 2);
 
        return insn.word;
 }
@@ -82,7 +73,6 @@ u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
 u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
 {
        long offset = dest - pc;
-       unsigned int immediate_l, immediate_h;
        union loongarch_instruction insn;
 
        if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
@@ -90,15 +80,7 @@ u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
                return INSN_BREAK;
        }
 
-       offset >>= 2;
-
-       immediate_l = offset & 0xffff;
-       offset >>= 16;
-       immediate_h = offset & 0x3ff;
-
-       insn.reg0i26_format.opcode = bl_op;
-       insn.reg0i26_format.immediate_l = immediate_l;
-       insn.reg0i26_format.immediate_h = immediate_h;
+       emit_bl(&insn, offset >> 2);
 
        return insn.word;
 }
@@ -107,10 +89,7 @@ u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongar
 {
        union loongarch_instruction insn;
 
-       insn.reg3_format.opcode = or_op;
-       insn.reg3_format.rd = rd;
-       insn.reg3_format.rj = rj;
-       insn.reg3_format.rk = rk;
+       emit_or(&insn, rd, rj, rk);
 
        return insn.word;
 }
@@ -124,9 +103,7 @@ u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg1i20_format.opcode = lu12iw_op;
-       insn.reg1i20_format.rd = rd;
-       insn.reg1i20_format.immediate = imm;
+       emit_lu12iw(&insn, rd, imm);
 
        return insn.word;
 }
@@ -135,9 +112,7 @@ u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg1i20_format.opcode = lu32id_op;
-       insn.reg1i20_format.rd = rd;
-       insn.reg1i20_format.immediate = imm;
+       emit_lu32id(&insn, rd, imm);
 
        return insn.word;
 }
@@ -146,10 +121,7 @@ u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
 {
        union loongarch_instruction insn;
 
-       insn.reg2i12_format.opcode = lu52id_op;
-       insn.reg2i12_format.rd = rd;
-       insn.reg2i12_format.rj = rj;
-       insn.reg2i12_format.immediate = imm;
+       emit_lu52id(&insn, rd, rj, imm);
 
        return insn.word;
 }
@@ -158,10 +130,7 @@ u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned l
 {
        union loongarch_instruction insn;
 
-       insn.reg2i16_format.opcode = jirl_op;
-       insn.reg2i16_format.rd = rd;
-       insn.reg2i16_format.rj = rj;
-       insn.reg2i16_format.immediate = (dest - pc) >> 2;
+       emit_jirl(&insn, rj, rd, (dest - pc) >> 2);
 
        return insn.word;
 }
index c583b1ef1f44cefaada16d0d9c450b2019515ec9..edfd220a3737aadad190d43a2ecaf91fdcaf44e5 100644 (file)
@@ -191,20 +191,14 @@ out:
 
 unsigned long __get_wchan(struct task_struct *task)
 {
-       unsigned long pc;
+       unsigned long pc = 0;
        struct unwind_state state;
 
        if (!try_get_task_stack(task))
                return 0;
 
-       unwind_start(&state, task, NULL);
-       state.sp = thread_saved_fp(task);
-       get_stack_info(state.sp, state.task, &state.stack_info);
-       state.pc = thread_saved_ra(task);
-#ifdef CONFIG_UNWINDER_PROLOGUE
-       state.type = UNWINDER_PROLOGUE;
-#endif
-       for (; !unwind_done(&state); unwind_next_frame(&state)) {
+       for (unwind_start(&state, task, NULL);
+            !unwind_done(&state); unwind_next_frame(&state)) {
                pc = unwind_get_return_address(&state);
                if (!pc)
                        break;
index 7ea62faeeadb561362b0228d4937bedcb7466e51..c38a146a973b45beafc0e984bdff77631fbea0bb 100644 (file)
@@ -72,9 +72,6 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
        if (!task)
                task = current;
 
-       if (user_mode(regs))
-               state.type = UNWINDER_GUESS;
-
        printk("%sCall Trace:", loglvl);
        for (unwind_start(&state, task, pregs);
              !unwind_done(&state); unwind_next_frame(&state)) {
diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
new file mode 100644 (file)
index 0000000..a463d69
--- /dev/null
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+
+#include <asm/unwind.h>
+
+bool default_next_frame(struct unwind_state *state)
+{
+       struct stack_info *info = &state->stack_info;
+       unsigned long addr;
+
+       if (unwind_done(state))
+               return false;
+
+       do {
+               for (state->sp += sizeof(unsigned long);
+                    state->sp < info->end; state->sp += sizeof(unsigned long)) {
+                       addr = *(unsigned long *)(state->sp);
+                       state->pc = unwind_graph_addr(state, addr, state->sp + 8);
+                       if (__kernel_text_address(state->pc))
+                               return true;
+               }
+
+               state->sp = info->next_sp;
+
+       } while (!get_stack_info(state->sp, state->task, info));
+
+       return false;
+}
index e2d2e4f3001f490a76762ad336fbcf696c4bbfe3..98379b7d41475b89bb85f1c4d32be29343612cc8 100644 (file)
@@ -2,37 +2,18 @@
 /*
  * Copyright (C) 2022 Loongson Technology Corporation Limited
  */
-#include <linux/kernel.h>
-#include <linux/ftrace.h>
-
 #include <asm/unwind.h>
 
 unsigned long unwind_get_return_address(struct unwind_state *state)
 {
-       if (unwind_done(state))
-               return 0;
-       else if (state->first)
-               return state->pc;
-
-       return *(unsigned long *)(state->sp);
+       return __unwind_get_return_address(state);
 }
 EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
 void unwind_start(struct unwind_state *state, struct task_struct *task,
                    struct pt_regs *regs)
 {
-       memset(state, 0, sizeof(*state));
-
-       if (regs) {
-               state->sp = regs->regs[3];
-               state->pc = regs->csr_era;
-       }
-
-       state->task = task;
-       state->first = true;
-
-       get_stack_info(state->sp, state->task, &state->stack_info);
-
+       __unwind_start(state, task, regs);
        if (!unwind_done(state) && !__kernel_text_address(state->pc))
                unwind_next_frame(state);
 }
@@ -40,30 +21,6 @@ EXPORT_SYMBOL_GPL(unwind_start);
 
 bool unwind_next_frame(struct unwind_state *state)
 {
-       struct stack_info *info = &state->stack_info;
-       unsigned long addr;
-
-       if (unwind_done(state))
-               return false;
-
-       if (state->first)
-               state->first = false;
-
-       do {
-               for (state->sp += sizeof(unsigned long);
-                    state->sp < info->end;
-                    state->sp += sizeof(unsigned long)) {
-                       addr = *(unsigned long *)(state->sp);
-                       state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                       addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                       if (__kernel_text_address(addr))
-                               return true;
-               }
-
-               state->sp = info->next_sp;
-
-       } while (!get_stack_info(state->sp, state->task, info));
-
-       return false;
+       return default_next_frame(state);
 }
 EXPORT_SYMBOL_GPL(unwind_next_frame);
index 0f8d1451ebb848d8a88f8a5c9405903a8d4c93c0..9095fde8e55d5c57177c83ab0548ab888c348f16 100644 (file)
 /*
  * Copyright (C) 2022 Loongson Technology Corporation Limited
  */
+#include <linux/cpumask.h>
 #include <linux/ftrace.h>
 #include <linux/kallsyms.h>
 
 #include <asm/inst.h>
+#include <asm/loongson.h>
 #include <asm/ptrace.h>
+#include <asm/setup.h>
 #include <asm/unwind.h>
 
-static inline void unwind_state_fixup(struct unwind_state *state)
-{
-#ifdef CONFIG_DYNAMIC_FTRACE
-       static unsigned long ftrace = (unsigned long)ftrace_call + 4;
-
-       if (state->pc == ftrace)
-               state->is_ftrace = true;
+extern const int unwind_hint_ade;
+extern const int unwind_hint_ale;
+extern const int unwind_hint_bp;
+extern const int unwind_hint_fpe;
+extern const int unwind_hint_fpu;
+extern const int unwind_hint_lsx;
+extern const int unwind_hint_lasx;
+extern const int unwind_hint_lbt;
+extern const int unwind_hint_ri;
+extern const int unwind_hint_watch;
+extern unsigned long eentry;
+#ifdef CONFIG_NUMA
+extern unsigned long pcpu_handlers[NR_CPUS];
 #endif
-}
 
-unsigned long unwind_get_return_address(struct unwind_state *state)
+static inline bool scan_handlers(unsigned long entry_offset)
 {
+       int idx, offset;
 
-       if (unwind_done(state))
-               return 0;
-       else if (state->type)
-               return state->pc;
-       else if (state->first)
-               return state->pc;
-
-       return *(unsigned long *)(state->sp);
+       if (entry_offset >= EXCCODE_INT_START * VECSIZE)
+               return false;
 
+       idx = entry_offset / VECSIZE;
+       offset = entry_offset % VECSIZE;
+       switch (idx) {
+       case EXCCODE_ADE:
+               return offset == unwind_hint_ade;
+       case EXCCODE_ALE:
+               return offset == unwind_hint_ale;
+       case EXCCODE_BP:
+               return offset == unwind_hint_bp;
+       case EXCCODE_FPE:
+               return offset == unwind_hint_fpe;
+       case EXCCODE_FPDIS:
+               return offset == unwind_hint_fpu;
+       case EXCCODE_LSXDIS:
+               return offset == unwind_hint_lsx;
+       case EXCCODE_LASXDIS:
+               return offset == unwind_hint_lasx;
+       case EXCCODE_BTDIS:
+               return offset == unwind_hint_lbt;
+       case EXCCODE_INE:
+               return offset == unwind_hint_ri;
+       case EXCCODE_WATCH:
+               return offset == unwind_hint_watch;
+       default:
+               return false;
+       }
 }
-EXPORT_SYMBOL_GPL(unwind_get_return_address);
 
-static bool unwind_by_guess(struct unwind_state *state)
+static inline bool fix_exception(unsigned long pc)
 {
-       struct stack_info *info = &state->stack_info;
-       unsigned long addr;
-
-       for (state->sp += sizeof(unsigned long);
-            state->sp < info->end;
-            state->sp += sizeof(unsigned long)) {
-               addr = *(unsigned long *)(state->sp);
-               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                               addr, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-               if (__kernel_text_address(addr))
+#ifdef CONFIG_NUMA
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               if (!pcpu_handlers[cpu])
+                       continue;
+               if (scan_handlers(pc - pcpu_handlers[cpu]))
                        return true;
        }
+#endif
+       return scan_handlers(pc - eentry);
+}
 
+/*
+ * As we meet ftrace_regs_entry, reset first flag like first doing
+ * tracing. Prologue analysis will stop soon because PC is at entry.
+ */
+static inline bool fix_ftrace(unsigned long pc)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+       return pc == (unsigned long)ftrace_call + LOONGARCH_INSN_SIZE;
+#else
        return false;
+#endif
 }
 
+static inline bool unwind_state_fixup(struct unwind_state *state)
+{
+       if (!fix_exception(state->pc) && !fix_ftrace(state->pc))
+               return false;
+
+       state->reset = true;
+       return true;
+}
+
+/*
+ * LoongArch function prologue is like follows,
+ *     [instructions not use stack var]
+ *     addi.d sp, sp, -imm
+ *     st.d   xx, sp, offset <- save callee saved regs and
+ *     st.d   yy, sp, offset    save ra if function is nest.
+ *     [others instructions]
+ */
 static bool unwind_by_prologue(struct unwind_state *state)
 {
        long frame_ra = -1;
        unsigned long frame_size = 0;
-       unsigned long size, offset, pc = state->pc;
+       unsigned long size, offset, pc;
        struct pt_regs *regs;
        struct stack_info *info = &state->stack_info;
        union loongarch_instruction *ip, *ip_end;
@@ -64,20 +119,21 @@ static bool unwind_by_prologue(struct unwind_state *state)
        if (state->sp >= info->end || state->sp < info->begin)
                return false;
 
-       if (state->is_ftrace) {
-               /*
-                * As we meet ftrace_regs_entry, reset first flag like first doing
-                * tracing. Prologue analysis will stop soon because PC is at entry.
-                */
+       if (state->reset) {
                regs = (struct pt_regs *)state->sp;
                state->first = true;
-               state->is_ftrace = false;
+               state->reset = false;
                state->pc = regs->csr_era;
                state->ra = regs->regs[1];
                state->sp = regs->regs[3];
                return true;
        }
 
+       /*
+        * When first is not set, the PC is a return address in the previous frame.
+        * We need to adjust its value in case overflow to the next symbol.
+        */
+       pc = state->pc - (state->first ? 0 : LOONGARCH_INSN_SIZE);
        if (!kallsyms_lookup_size_offset(pc, &size, &offset))
                return false;
 
@@ -93,6 +149,10 @@ static bool unwind_by_prologue(struct unwind_state *state)
                ip++;
        }
 
+       /*
+        * Can't find stack alloc action, PC may be in a leaf function. Only the
+        * first being true is reasonable, otherwise indicate analysis is broken.
+        */
        if (!frame_size) {
                if (state->first)
                        goto first;
@@ -110,6 +170,7 @@ static bool unwind_by_prologue(struct unwind_state *state)
                ip++;
        }
 
+       /* Can't find save $ra action, PC may be in a leaf function, too. */
        if (frame_ra < 0) {
                if (state->first) {
                        state->sp = state->sp + frame_size;
@@ -118,88 +179,47 @@ static bool unwind_by_prologue(struct unwind_state *state)
                return false;
        }
 
-       if (state->first)
-               state->first = false;
-
        state->pc = *(unsigned long *)(state->sp + frame_ra);
        state->sp = state->sp + frame_size;
        goto out;
 
 first:
-       state->first = false;
-       if (state->pc == state->ra)
-               return false;
-
        state->pc = state->ra;
 
 out:
-       unwind_state_fixup(state);
-       return !!__kernel_text_address(state->pc);
-}
-
-void unwind_start(struct unwind_state *state, struct task_struct *task,
-                   struct pt_regs *regs)
-{
-       memset(state, 0, sizeof(*state));
-
-       if (regs &&  __kernel_text_address(regs->csr_era)) {
-               state->pc = regs->csr_era;
-               state->sp = regs->regs[3];
-               state->ra = regs->regs[1];
-               state->type = UNWINDER_PROLOGUE;
-       }
-
-       state->task = task;
-       state->first = true;
-
-       get_stack_info(state->sp, state->task, &state->stack_info);
-
-       if (!unwind_done(state) && !__kernel_text_address(state->pc))
-               unwind_next_frame(state);
+       state->first = false;
+       return unwind_state_fixup(state) || __kernel_text_address(state->pc);
 }
-EXPORT_SYMBOL_GPL(unwind_start);
 
-bool unwind_next_frame(struct unwind_state *state)
+static bool next_frame(struct unwind_state *state)
 {
-       struct stack_info *info = &state->stack_info;
-       struct pt_regs *regs;
        unsigned long pc;
+       struct pt_regs *regs;
+       struct stack_info *info = &state->stack_info;
 
        if (unwind_done(state))
                return false;
 
        do {
-               switch (state->type) {
-               case UNWINDER_GUESS:
-                       state->first = false;
-                       if (unwind_by_guess(state))
-                               return true;
-                       break;
+               if (unwind_by_prologue(state)) {
+                       state->pc = unwind_graph_addr(state, state->pc, state->sp);
+                       return true;
+               }
+
+               if (info->type == STACK_TYPE_IRQ && info->end == state->sp) {
+                       regs = (struct pt_regs *)info->next_sp;
+                       pc = regs->csr_era;
+
+                       if (user_mode(regs) || !__kernel_text_address(pc))
+                               return false;
+
+                       state->first = true;
+                       state->pc = pc;
+                       state->ra = regs->regs[1];
+                       state->sp = regs->regs[3];
+                       get_stack_info(state->sp, state->task, info);
 
-               case UNWINDER_PROLOGUE:
-                       if (unwind_by_prologue(state)) {
-                               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                               state->pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                               return true;
-                       }
-
-                       if (info->type == STACK_TYPE_IRQ &&
-                               info->end == state->sp) {
-                               regs = (struct pt_regs *)info->next_sp;
-                               pc = regs->csr_era;
-
-                               if (user_mode(regs) || !__kernel_text_address(pc))
-                                       return false;
-
-                               state->first = true;
-                               state->ra = regs->regs[1];
-                               state->sp = regs->regs[3];
-                               state->pc = ftrace_graph_ret_addr(state->task, &state->graph_idx,
-                                               pc, (unsigned long *)(state->sp - GRAPH_FAKE_OFFSET));
-                               get_stack_info(state->sp, state->task, info);
-
-                               return true;
-                       }
+                       return true;
                }
 
                state->sp = info->next_sp;
@@ -208,4 +228,36 @@ bool unwind_next_frame(struct unwind_state *state)
 
        return false;
 }
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+       return __unwind_get_return_address(state);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+                   struct pt_regs *regs)
+{
+       __unwind_start(state, task, regs);
+       state->type = UNWINDER_PROLOGUE;
+       state->first = true;
+
+       /*
+        * The current PC is not kernel text address, we cannot find its
+        * relative symbol. Thus, prologue analysis will be broken. Luckily,
+        * we can use the default_next_frame().
+        */
+       if (!__kernel_text_address(state->pc)) {
+               state->type = UNWINDER_GUESS;
+               if (!unwind_done(state))
+                       unwind_next_frame(state);
+       }
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+       return state->type == UNWINDER_PROLOGUE ?
+                       next_frame(state) : default_next_frame(state);
+}
 EXPORT_SYMBOL_GPL(unwind_next_frame);
index da3681f131c8d8a722b7f479775f7a48be47fc0a..8bad6b0cff59b96b2ebdda34214fdc1e4758e27e 100644 (file)
@@ -251,7 +251,7 @@ static void output_pgtable_bits_defines(void)
 }
 
 #ifdef CONFIG_NUMA
-static unsigned long pcpu_handlers[NR_CPUS];
+unsigned long pcpu_handlers[NR_CPUS];
 #endif
 extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
 
index 01c132bc33d540fd0d7aad9f205db23f50763986..4d06de77d92a69a80af6a1e996b9d8a47a2e1f0d 100644 (file)
@@ -64,7 +64,7 @@ void __init plat_mem_setup(void)
        dtb = get_fdt();
        __dt_setup_arch(dtb);
 
-       if (!early_init_dt_scan_memory())
+       if (early_init_dt_scan_memory())
                return;
 
        if (soc_info.mem_detect)
index 74e17e134387d6b97b86b1beb047ff1befc723ee..27714dc2f04a541a739c4dc97bcf2b4fe78265d7 100644 (file)
                interrupts = <16 2 1 9>;
        };
 };
+
+&fman0_rx_0x08 {
+       /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_tx_0x28 {
+       /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_rx_0x09 {
+       /delete-property/ fsl,fman-10g-port;
+};
+
+&fman0_tx_0x29 {
+       /delete-property/ fsl,fman-10g-port;
+};
index af04cea82b941f8174e5e9a5c7fb01e4b9ac55b9..352d7de24018fbd4677e663a62afae46b1a52743 100755 (executable)
@@ -210,6 +210,10 @@ ld_version()
        gsub(".*version ", "");
        gsub("-.*", "");
        split($1,a, ".");
+       if( length(a[3]) == "8" )
+               # a[3] is probably a date of format yyyymmdd used for release snapshots. We
+               # can assume it to be zero as it does not signify a new version as such.
+               a[3] = 0;
        print a[1]*100000000 + a[2]*1000000 + a[3]*10000;
        exit
     }'
index 4f897993b7107537c25483df2d5cfd8613a4bd81..699a88584ae16f194c7c7ccf4f8212af9977a876 100644 (file)
@@ -137,7 +137,7 @@ struct imc_pmu {
  * are inited.
  */
 struct imc_pmu_ref {
-       struct mutex lock;
+       spinlock_t lock;
        unsigned int id;
        int refc;
 };
index 8c3862b4c259d6a2b89d40cfd712ce4d2f974179..958e77a24f85bb2ce8a25f6cf204a27e70ffd883 100644 (file)
@@ -8,6 +8,7 @@
 #define BSS_FIRST_SECTIONS *(.bss.prominit)
 #define EMITS_PT_NOTE
 #define RO_EXCEPTION_TABLE_ALIGN       0
+#define RUNTIME_DISCARD_EXIT
 
 #define SOFT_MASK_TABLE(align)                                         \
        . = ALIGN(align);                                               \
@@ -410,9 +411,12 @@ SECTIONS
        DISCARDS
        /DISCARD/ : {
                *(*.EMB.apuinfo)
-               *(.glink .iplt .plt .rela* .comment)
+               *(.glink .iplt .plt)
                *(.gnu.version*)
                *(.gnu.attributes)
                *(.eh_frame)
+#ifndef CONFIG_RELOCATABLE
+               *(.rela*)
+#endif
        }
 }
index 80a148c57de8137aba48022373d3475a567c219f..44a35ed4f68605490bb2950d1ba0f0724e2f8e35 100644 (file)
@@ -1012,7 +1012,7 @@ static void __init hash_init_partition_table(phys_addr_t hash_table,
 
 void hpt_clear_stress(void);
 static struct timer_list stress_hpt_timer;
-void stress_hpt_timer_fn(struct timer_list *timer)
+static void stress_hpt_timer_fn(struct timer_list *timer)
 {
        int next_cpu;
 
index d517aba94d1bc4cb8e176700e2b29894a71a19fa..100e97daf76ba140de318eb029cc9f69cba2df3d 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/cputhreads.h>
 #include <asm/smp.h>
 #include <linux/string.h>
+#include <linux/spinlock.h>
 
 /* Nest IMC data structures and variables */
 
@@ -21,7 +22,7 @@
  * Used to avoid races in counting the nest-pmu units during hotplug
  * register and unregister
  */
-static DEFINE_MUTEX(nest_init_lock);
+static DEFINE_SPINLOCK(nest_init_lock);
 static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
 static struct imc_pmu **per_nest_pmu_arr;
 static cpumask_t nest_imc_cpumask;
@@ -50,7 +51,7 @@ static int trace_imc_mem_size;
  * core and trace-imc
  */
 static struct imc_pmu_ref imc_global_refc = {
-       .lock = __MUTEX_INITIALIZER(imc_global_refc.lock),
+       .lock = __SPIN_LOCK_INITIALIZER(imc_global_refc.lock),
        .id = 0,
        .refc = 0,
 };
@@ -400,7 +401,7 @@ static int ppc_nest_imc_cpu_offline(unsigned int cpu)
                                       get_hard_smp_processor_id(cpu));
                /*
                 * If this is the last cpu in this chip then, skip the reference
-                * count mutex lock and make the reference count on this chip zero.
+                * count lock and make the reference count on this chip zero.
                 */
                ref = get_nest_pmu_ref(cpu);
                if (!ref)
@@ -462,15 +463,15 @@ static void nest_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the nest PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the nest counters.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return;
 
-       /* Take the mutex lock for this node and then decrement the reference count */
-       mutex_lock(&ref->lock);
+       /* Take the lock for this node and then decrement the reference count */
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -482,7 +483,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that node.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -490,7 +491,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id);
                        return;
                }
@@ -498,7 +499,7 @@ static void nest_imc_counters_release(struct perf_event *event)
                WARN(1, "nest-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 }
 
 static int nest_imc_event_init(struct perf_event *event)
@@ -557,26 +558,25 @@ static int nest_imc_event_init(struct perf_event *event)
 
        /*
         * Get the imc_pmu_ref struct for this node.
-        * Take the mutex lock and then increment the count of nest pmu events
-        * inited.
+        * Take the lock and then increment the count of nest pmu events inited.
         */
        ref = get_nest_pmu_ref(event->cpu);
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_NEST,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("nest-imc: Unable to start the counters for node %d\n",
                                                                        node_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        event->destroy = nest_imc_counters_release;
        return 0;
@@ -612,9 +612,8 @@ static int core_imc_mem_init(int cpu, int size)
                return -ENOMEM;
        mem_info->vbase = page_address(page);
 
-       /* Init the mutex */
        core_imc_refc[core_id].id = core_id;
-       mutex_init(&core_imc_refc[core_id].lock);
+       spin_lock_init(&core_imc_refc[core_id].lock);
 
        rc = opal_imc_counters_init(OPAL_IMC_COUNTERS_CORE,
                                __pa((void *)mem_info->vbase),
@@ -703,9 +702,8 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu);
        } else {
                /*
-                * If this is the last cpu in this core then, skip taking refernce
-                * count mutex lock for this core and directly zero "refc" for
-                * this core.
+                * If this is the last cpu in this core then skip taking reference
+                * count lock for this core and directly zero "refc" for this core.
                 */
                opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                       get_hard_smp_processor_id(cpu));
@@ -720,11 +718,11 @@ static int ppc_core_imc_cpu_offline(unsigned int cpu)
                 * last cpu in this core and core-imc event running
                 * in this cpu.
                 */
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                if (imc_global_refc.id == IMC_DOMAIN_CORE)
                        imc_global_refc.refc--;
 
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
        }
        return 0;
 }
@@ -739,7 +737,7 @@ static int core_imc_pmu_cpumask_init(void)
 
 static void reset_global_refc(struct perf_event *event)
 {
-               mutex_lock(&imc_global_refc.lock);
+               spin_lock(&imc_global_refc.lock);
                imc_global_refc.refc--;
 
                /*
@@ -751,7 +749,7 @@ static void reset_global_refc(struct perf_event *event)
                        imc_global_refc.refc = 0;
                        imc_global_refc.id = 0;
                }
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
 }
 
 static void core_imc_counters_release(struct perf_event *event)
@@ -764,17 +762,17 @@ static void core_imc_counters_release(struct perf_event *event)
        /*
         * See if we need to disable the IMC PMU.
         * If no events are currently in use, then we have to take a
-        * mutex to ensure that we don't race with another task doing
+        * lock to ensure that we don't race with another task doing
         * enable or disable the core counters.
         */
        core_id = event->cpu / threads_per_core;
 
-       /* Take the mutex lock and decrement the refernce count for this core */
+       /* Take the lock and decrement the refernce count for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                /*
                 * The scenario where this is true is, when perf session is
@@ -786,7 +784,7 @@ static void core_imc_counters_release(struct perf_event *event)
                 * an OPAL call to disable the engine in that core.
                 *
                 */
-               mutex_unlock(&ref->lock);
+               spin_unlock(&ref->lock);
                return;
        }
        ref->refc--;
@@ -794,7 +792,7 @@ static void core_imc_counters_release(struct perf_event *event)
                rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                                            get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("IMC: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
@@ -802,7 +800,7 @@ static void core_imc_counters_release(struct perf_event *event)
                WARN(1, "core-imc: Invalid event reference count\n");
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        reset_global_refc(event);
 }
@@ -840,7 +838,6 @@ static int core_imc_event_init(struct perf_event *event)
        if ((!pcmi->vbase))
                return -ENODEV;
 
-       /* Get the core_imc mutex for this core */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
@@ -848,22 +845,22 @@ static int core_imc_event_init(struct perf_event *event)
        /*
         * Core pmu units are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the core counters.
+        * If yes, take the lock and enable the core counters.
         * If not, just increment the count in core_imc_refc struct.
         */
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                rc = opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                                             get_hard_smp_processor_id(event->cpu));
                if (rc) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("core-imc: Unable to start the counters for core %d\n",
                                                                        core_id);
                        return rc;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /*
         * Since the system can run either in accumulation or trace-mode
@@ -874,7 +871,7 @@ static int core_imc_event_init(struct perf_event *event)
         * to know whether any other trace/thread imc
         * events are running.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_CORE) {
                /*
                 * No other trace/thread imc events are running in
@@ -883,10 +880,10 @@ static int core_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_CORE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK);
        event->destroy = core_imc_counters_release;
@@ -958,10 +955,10 @@ static int ppc_thread_imc_cpu_offline(unsigned int cpu)
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
 
        /* Reduce the refc if thread-imc event running on this cpu */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_THREAD)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1001,7 +998,7 @@ static int thread_imc_event_init(struct perf_event *event)
        if (!target)
                return -EINVAL;
 
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        /*
         * Check if any other trace/core imc events are running in the
         * system, if not set the global id to thread-imc.
@@ -1010,10 +1007,10 @@ static int thread_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_THREAD;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->pmu->task_ctx_nr = perf_sw_context;
        event->destroy = reset_global_refc;
@@ -1135,25 +1132,25 @@ static int thread_imc_event_add(struct perf_event *event, int flags)
        /*
         * imc pmus are enabled only when it is used.
         * See if this is triggered for the first time.
-        * If yes, take the mutex lock and enable the counters.
+        * If yes, take the lock and enable the counters.
         * If not, just increment the count in ref count struct.
         */
        ref = &core_imc_refc[core_id];
        if (!ref)
                return -EINVAL;
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to start the counter\
                                for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1170,12 +1167,12 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
                    get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("thread-imc: Unable to stop the counters\
                                for core %d\n", core_id);
                        return;
@@ -1183,7 +1180,7 @@ static void thread_imc_event_del(struct perf_event *event, int flags)
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        /* Set bit 0 of LDBAR to zero, to stop posting updates to memory */
        mtspr(SPRN_LDBAR, (mfspr(SPRN_LDBAR) & (~(1UL << 63))));
@@ -1224,9 +1221,8 @@ static int trace_imc_mem_alloc(int cpu_id, int size)
                }
        }
 
-       /* Init the mutex, if not already */
        trace_imc_refc[core_id].id = core_id;
-       mutex_init(&trace_imc_refc[core_id].lock);
+       spin_lock_init(&trace_imc_refc[core_id].lock);
 
        mtspr(SPRN_LDBAR, 0);
        return 0;
@@ -1246,10 +1242,10 @@ static int ppc_trace_imc_cpu_offline(unsigned int cpu)
         * Reduce the refc if any trace-imc event running
         * on this cpu.
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == IMC_DOMAIN_TRACE)
                imc_global_refc.refc--;
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        return 0;
 }
@@ -1371,17 +1367,17 @@ static int trace_imc_event_add(struct perf_event *event, int flags)
        }
 
        mtspr(SPRN_LDBAR, ldbar_value);
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        if (ref->refc == 0) {
                if (opal_imc_counters_start(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to start the counters for core %d\n", core_id);
                        return -EINVAL;
                }
        }
        ++ref->refc;
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
        return 0;
 }
 
@@ -1414,19 +1410,19 @@ static void trace_imc_event_del(struct perf_event *event, int flags)
                return;
        }
 
-       mutex_lock(&ref->lock);
+       spin_lock(&ref->lock);
        ref->refc--;
        if (ref->refc == 0) {
                if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_TRACE,
                                get_hard_smp_processor_id(smp_processor_id()))) {
-                       mutex_unlock(&ref->lock);
+                       spin_unlock(&ref->lock);
                        pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id);
                        return;
                }
        } else if (ref->refc < 0) {
                ref->refc = 0;
        }
-       mutex_unlock(&ref->lock);
+       spin_unlock(&ref->lock);
 
        trace_imc_event_stop(event, flags);
 }
@@ -1448,7 +1444,7 @@ static int trace_imc_event_init(struct perf_event *event)
         * no other thread is running any core/thread imc
         * events
         */
-       mutex_lock(&imc_global_refc.lock);
+       spin_lock(&imc_global_refc.lock);
        if (imc_global_refc.id == 0 || imc_global_refc.id == IMC_DOMAIN_TRACE) {
                /*
                 * No core/thread imc events are running in the
@@ -1457,10 +1453,10 @@ static int trace_imc_event_init(struct perf_event *event)
                imc_global_refc.id = IMC_DOMAIN_TRACE;
                imc_global_refc.refc++;
        } else {
-               mutex_unlock(&imc_global_refc.lock);
+               spin_unlock(&imc_global_refc.lock);
                return -EBUSY;
        }
-       mutex_unlock(&imc_global_refc.lock);
+       spin_unlock(&imc_global_refc.lock);
 
        event->hw.idx = -1;
 
@@ -1533,10 +1529,10 @@ static int init_nest_pmu_ref(void)
        i = 0;
        for_each_node(nid) {
                /*
-                * Mutex lock to avoid races while tracking the number of
+                * Take the lock to avoid races while tracking the number of
                 * sessions using the chip's nest pmu units.
                 */
-               mutex_init(&nest_imc_refc[i].lock);
+               spin_lock_init(&nest_imc_refc[i].lock);
 
                /*
                 * Loop to init the "id" with the node_id. Variable "i" initialized to
@@ -1633,7 +1629,7 @@ static void imc_common_mem_free(struct imc_pmu *pmu_ptr)
 static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 {
        if (pmu_ptr->domain == IMC_DOMAIN_NEST) {
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 1) {
                        cpuhp_remove_state(CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE);
                        kfree(nest_imc_refc);
@@ -1643,7 +1639,7 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr)
 
                if (nest_pmus > 0)
                        nest_pmus--;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
        }
 
        /* Free core_imc memory */
@@ -1800,11 +1796,11 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                * rest. To handle the cpuhotplug callback unregister, we track
                * the number of nest pmus in "nest_pmus".
                */
-               mutex_lock(&nest_init_lock);
+               spin_lock(&nest_init_lock);
                if (nest_pmus == 0) {
                        ret = init_nest_pmu_ref();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
                                goto err_free_mem;
@@ -1812,7 +1808,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        /* Register for cpu hotplug notification. */
                        ret = nest_pmu_cpumask_init();
                        if (ret) {
-                               mutex_unlock(&nest_init_lock);
+                               spin_unlock(&nest_init_lock);
                                kfree(nest_imc_refc);
                                kfree(per_nest_pmu_arr);
                                per_nest_pmu_arr = NULL;
@@ -1820,7 +1816,7 @@ int init_imc_pmu(struct device_node *parent, struct imc_pmu *pmu_ptr, int pmu_id
                        }
                }
                nest_pmus++;
-               mutex_unlock(&nest_init_lock);
+               spin_unlock(&nest_init_lock);
                break;
        case IMC_DOMAIN_CORE:
                ret = core_imc_pmu_cpumask_init();
index 43bed6c0a84fe81b10189f53842d739348b25f34..5235fd1c9cb6773d2f23562fe999ffa0ca21dc6b 100644 (file)
                        bus-range = <0x0 0xff>;
                        ranges = <0x81000000  0x0 0x60080000  0x0 0x60080000 0x0 0x10000>,      /* I/O */
                                 <0x82000000  0x0 0x60090000  0x0 0x60090000 0x0 0xff70000>,    /* mem */
-                                <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x1000000>,    /* mem */
+                                <0x82000000  0x0 0x70000000  0x0 0x70000000 0x0 0x10000000>,    /* mem */
                                 <0xc3000000 0x20 0x00000000 0x20 0x00000000 0x20 0x00000000>;  /* mem prefetchable */
                        num-lanes = <0x8>;
                        interrupts = <56>, <57>, <58>, <59>, <60>, <61>, <62>, <63>, <64>;
index 855450bed9f52c2b7b06360dc5976b5519e5592b..ec0cab9fbddd0da98cb415af2732a4ede083886b 100644 (file)
@@ -165,7 +165,7 @@ do {                                                                \
        might_fault();                                          \
        access_ok(__p, sizeof(*__p)) ?          \
                __get_user((x), __p) :                          \
-               ((x) = 0, -EFAULT);                             \
+               ((x) = (__force __typeof__(x))0, -EFAULT);      \
 })
 
 #define __put_user_asm(insn, x, ptr, err)                      \
index cb6ff7dccb92e18bde5f9b517e42ec851a98b986..de8474146a9b6e71e025fcb79daef39e1cd8d2e7 100644 (file)
@@ -31,9 +31,9 @@ __RISCV_INSN_FUNCS(fence,     0x7f, 0x0f);
        } while (0)
 
 __RISCV_INSN_FUNCS(c_j,                0xe003, 0xa001);
-__RISCV_INSN_FUNCS(c_jr,       0xf007, 0x8002);
+__RISCV_INSN_FUNCS(c_jr,       0xf07f, 0x8002);
 __RISCV_INSN_FUNCS(c_jal,      0xe003, 0x2001);
-__RISCV_INSN_FUNCS(c_jalr,     0xf007, 0x9002);
+__RISCV_INSN_FUNCS(c_jalr,     0xf07f, 0x9002);
 __RISCV_INSN_FUNCS(c_beqz,     0xe003, 0xc001);
 __RISCV_INSN_FUNCS(c_bnez,     0xe003, 0xe001);
 __RISCV_INSN_FUNCS(c_ebreak,   0xffff, 0x9002);
index e27c2140d620689eb8270695059f6815b38b981e..8dcd7af2911a07c36d792aa0366314579b512e28 100644 (file)
@@ -23,9 +23,9 @@
 #define memmove memmove
 #define memzero(s, n) memset((s), 0, (n))
 
-#ifdef CONFIG_KERNEL_BZIP2
+#if defined(CONFIG_KERNEL_BZIP2)
 #define BOOT_HEAP_SIZE 0x400000
-#elif CONFIG_KERNEL_ZSTD
+#elif defined(CONFIG_KERNEL_ZSTD)
 #define BOOT_HEAP_SIZE 0x30000
 #else
 #define BOOT_HEAP_SIZE 0x10000
index a7b4e1d82758072cb23cd5e988bd8d492d5a4490..74b35ec2ad28a800f25b3b211e219c9bf32ae2df 100644 (file)
@@ -190,7 +190,6 @@ CONFIG_NFT_CT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
@@ -569,6 +568,7 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+# CONFIG_LEGACY_TIOCSTI is not set
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_HANGCHECK_TIMER=m
@@ -660,6 +660,7 @@ CONFIG_CONFIGFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
 CONFIG_SQUASHFS_XATTR=y
 CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
@@ -705,6 +706,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_INTEGRITY_PLATFORM_KEYRING=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
@@ -781,6 +783,7 @@ CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_SYSTEM_BLACKLIST_KEYRING=y
 CONFIG_CORDIC=m
 CONFIG_CRYPTO_LIB_CURVE25519=m
 CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
@@ -848,7 +851,6 @@ CONFIG_PREEMPT_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_FTRACE_STARTUP_TEST=y
 # CONFIG_EVENT_TRACE_STARTUP_TEST is not set
@@ -870,7 +872,6 @@ CONFIG_FAIL_MAKE_REQUEST=y
 CONFIG_FAIL_IO_TIMEOUT=y
 CONFIG_FAIL_FUTEX=y
 CONFIG_FAULT_INJECTION_DEBUG_FS=y
-CONFIG_FAIL_FUNCTION=y
 CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
 CONFIG_LKDTM=m
 CONFIG_TEST_MIN_HEAP=y
index 2bc2d0fe57743ccd8caa5ea8838c1a8b555bfcc6..cec71268e3bc2e22916ad2c59c7a6601bb91284b 100644 (file)
@@ -181,7 +181,6 @@ CONFIG_NFT_CT=m
 CONFIG_NFT_LOG=m
 CONFIG_NFT_LIMIT=m
 CONFIG_NFT_NAT=m
-CONFIG_NFT_OBJREF=m
 CONFIG_NFT_REJECT=m
 CONFIG_NFT_COMPAT=m
 CONFIG_NFT_HASH=m
@@ -559,6 +558,7 @@ CONFIG_INPUT_EVDEV=y
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
 CONFIG_LEGACY_PTY_COUNT=0
+# CONFIG_LEGACY_TIOCSTI is not set
 CONFIG_VIRTIO_CONSOLE=m
 CONFIG_HW_RANDOM_VIRTIO=m
 CONFIG_HANGCHECK_TIMER=m
@@ -645,6 +645,7 @@ CONFIG_CONFIGFS_FS=m
 CONFIG_ECRYPT_FS=m
 CONFIG_CRAMFS=m
 CONFIG_SQUASHFS=m
+CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT=y
 CONFIG_SQUASHFS_XATTR=y
 CONFIG_SQUASHFS_LZ4=y
 CONFIG_SQUASHFS_LZO=y
@@ -688,6 +689,7 @@ CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_SECURITY_LANDLOCK=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
+CONFIG_INTEGRITY_PLATFORM_KEYRING=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
@@ -766,6 +768,7 @@ CONFIG_ZCRYPT=m
 CONFIG_PKEY=m
 CONFIG_CRYPTO_PAES_S390=m
 CONFIG_CRYPTO_DEV_VIRTIO=m
+CONFIG_SYSTEM_BLACKLIST_KEYRING=y
 CONFIG_CORDIC=m
 CONFIG_PRIME_NUMBERS=m
 CONFIG_CRYPTO_LIB_CURVE25519=m
@@ -798,7 +801,6 @@ CONFIG_STACK_TRACER=y
 CONFIG_SCHED_TRACER=y
 CONFIG_FTRACE_SYSCALLS=y
 CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_BPF_KPROBE_OVERRIDE=y
 CONFIG_HIST_TRIGGERS=y
 CONFIG_SAMPLES=y
 CONFIG_SAMPLE_TRACE_PRINTK=m
index ae14ab0b864d5946c277c2fabae7869223b3e87a..a9c0c81d1de992c8cfdadb7985e293856318681e 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
 CONFIG_HZ_100=y
-# CONFIG_RELOCATABLE is not set
 # CONFIG_CHSC_SCH is not set
 # CONFIG_SCM_BUS is not set
 CONFIG_CRASH_DUMP=y
@@ -50,6 +49,7 @@ CONFIG_ZFCP=y
 # CONFIG_INPUT_KEYBOARD is not set
 # CONFIG_INPUT_MOUSE is not set
 # CONFIG_SERIO is not set
+# CONFIG_LEGACY_TIOCSTI is not set
 # CONFIG_HVC_IUCV is not set
 # CONFIG_HW_RANDOM_S390 is not set
 # CONFIG_HMC_DRV is not set
index feaba12dbecb8d72d7e30128d3158c158f7b179d..efa103b52a1a190a19dd9d98bc98d81447274075 100644 (file)
@@ -131,19 +131,21 @@ struct hws_combined_entry {
        struct hws_diag_entry   diag;   /* Diagnostic-sampling data entry */
 } __packed;
 
-struct hws_trailer_entry {
-       union {
-               struct {
-                       unsigned int f:1;       /* 0 - Block Full Indicator   */
-                       unsigned int a:1;       /* 1 - Alert request control  */
-                       unsigned int t:1;       /* 2 - Timestamp format       */
-                       unsigned int :29;       /* 3 - 31: Reserved           */
-                       unsigned int bsdes:16;  /* 32-47: size of basic SDE   */
-                       unsigned int dsdes:16;  /* 48-63: size of diagnostic SDE */
-               };
-               unsigned long long flags;       /* 0 - 63: All indicators     */
+union hws_trailer_header {
+       struct {
+               unsigned int f:1;       /* 0 - Block Full Indicator   */
+               unsigned int a:1;       /* 1 - Alert request control  */
+               unsigned int t:1;       /* 2 - Timestamp format       */
+               unsigned int :29;       /* 3 - 31: Reserved           */
+               unsigned int bsdes:16;  /* 32-47: size of basic SDE   */
+               unsigned int dsdes:16;  /* 48-63: size of diagnostic SDE */
+               unsigned long long overflow; /* 64 - Overflow Count   */
        };
-       unsigned long long overflow;     /* 64 - sample Overflow count        */
+       __uint128_t val;
+};
+
+struct hws_trailer_entry {
+       union hws_trailer_header header; /* 0 - 15 Flags + Overflow Count     */
        unsigned char timestamp[16];     /* 16 - 31 timestamp                 */
        unsigned long long reserved1;    /* 32 -Reserved                      */
        unsigned long long reserved2;    /*                                   */
@@ -290,14 +292,11 @@ static inline unsigned long sample_rate_to_freq(struct hws_qsi_info_block *qsi,
        return USEC_PER_SEC * qsi->cpu_speed / rate;
 }
 
-#define SDB_TE_ALERT_REQ_MASK  0x4000000000000000UL
-#define SDB_TE_BUFFER_FULL_MASK 0x8000000000000000UL
-
 /* Return TOD timestamp contained in an trailer entry */
 static inline unsigned long long trailer_timestamp(struct hws_trailer_entry *te)
 {
        /* TOD in STCKE format */
-       if (te->t)
+       if (te->header.t)
                return *((unsigned long long *) &te->timestamp[1]);
 
        /* TOD in STCK format */
index 77f24262c25c18764f3c87f8e6a1b706db5714a1..ac665b9670c5dba3a62948459d187e80e5313afc 100644 (file)
@@ -4,8 +4,8 @@
  *
  *    Copyright IBM Corp. 1999, 2020
  */
-#ifndef DEBUG_H
-#define DEBUG_H
+#ifndef _ASM_S390_DEBUG_H
+#define _ASM_S390_DEBUG_H
 
 #include <linux/string.h>
 #include <linux/spinlock.h>
@@ -487,4 +487,4 @@ void debug_register_static(debug_info_t *id, int pages_per_area, int nr_areas);
 
 #endif /* MODULE */
 
-#endif /* DEBUG_H */
+#endif /* _ASM_S390_DEBUG_H */
index cb5fc06904354a2e1cb523e826f02c1d171f7f55..081837b391e353a4467b194719671e4bd435e3a9 100644 (file)
@@ -31,7 +31,7 @@
        pcp_op_T__ *ptr__;                                              \
        preempt_disable_notrace();                                      \
        ptr__ = raw_cpu_ptr(&(pcp));                                    \
-       prev__ = *ptr__;                                                \
+       prev__ = READ_ONCE(*ptr__);                                     \
        do {                                                            \
                old__ = prev__;                                         \
                new__ = old__ op (val);                                 \
index fc6d5f58debeb4c94338c2c4d73d27427c253299..2df94d32140c407962aec49f5d3e5d75e54fa58a 100644 (file)
@@ -187,8 +187,6 @@ static int kexec_file_add_ipl_report(struct kimage *image,
 
        data->memsz = ALIGN(data->memsz, PAGE_SIZE);
        buf.mem = data->memsz;
-       if (image->type == KEXEC_TYPE_CRASH)
-               buf.mem += crashk_res.start;
 
        ptr = (void *)ipl_cert_list_addr;
        end = ptr + ipl_cert_list_size;
@@ -225,6 +223,9 @@ static int kexec_file_add_ipl_report(struct kimage *image,
                data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
        *lc_ipl_parmblock_ptr = (__u32)buf.mem;
 
+       if (image->type == KEXEC_TYPE_CRASH)
+               buf.mem += crashk_res.start;
+
        ret = kexec_add_buffer(&buf);
 out:
        return ret;
index 332a4996513087ddede85bfa797be9408abad063..ce886a03545ae1d195962bd102992371f4da6874 100644 (file)
@@ -163,14 +163,15 @@ static void free_sampling_buffer(struct sf_buffer *sfb)
 
 static int alloc_sample_data_block(unsigned long *sdbt, gfp_t gfp_flags)
 {
-       unsigned long sdb, *trailer;
+       struct hws_trailer_entry *te;
+       unsigned long sdb;
 
        /* Allocate and initialize sample-data-block */
        sdb = get_zeroed_page(gfp_flags);
        if (!sdb)
                return -ENOMEM;
-       trailer = trailer_entry_ptr(sdb);
-       *trailer = SDB_TE_ALERT_REQ_MASK;
+       te = (struct hws_trailer_entry *)trailer_entry_ptr(sdb);
+       te->header.a = 1;
 
        /* Link SDB into the sample-data-block-table */
        *sdbt = sdb;
@@ -1206,7 +1207,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
                                            "%s: Found unknown"
                                            " sampling data entry: te->f %i"
                                            " basic.def %#4x (%p)\n", __func__,
-                                           te->f, sample->def, sample);
+                                           te->header.f, sample->def, sample);
                        /* Sample slot is not yet written or other record.
                         *
                         * This condition can occur if the buffer was reused
@@ -1217,7 +1218,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
                         * that are not full.  Stop processing if the first
                         * invalid format was detected.
                         */
-                       if (!te->f)
+                       if (!te->header.f)
                                break;
                }
 
@@ -1227,6 +1228,16 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
        }
 }
 
+static inline __uint128_t __cdsg(__uint128_t *ptr, __uint128_t old, __uint128_t new)
+{
+       asm volatile(
+               "       cdsg    %[old],%[new],%[ptr]\n"
+               : [old] "+d" (old), [ptr] "+QS" (*ptr)
+               : [new] "d" (new)
+               : "memory", "cc");
+       return old;
+}
+
 /* hw_perf_event_update() - Process sampling buffer
  * @event:     The perf event
  * @flush_all: Flag to also flush partially filled sample-data-blocks
@@ -1243,10 +1254,11 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
  */
 static void hw_perf_event_update(struct perf_event *event, int flush_all)
 {
+       unsigned long long event_overflow, sampl_overflow, num_sdb;
+       union hws_trailer_header old, prev, new;
        struct hw_perf_event *hwc = &event->hw;
        struct hws_trailer_entry *te;
        unsigned long *sdbt;
-       unsigned long long event_overflow, sampl_overflow, num_sdb, te_flags;
        int done;
 
        /*
@@ -1266,25 +1278,25 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                te = (struct hws_trailer_entry *) trailer_entry_ptr(*sdbt);
 
                /* Leave loop if no more work to do (block full indicator) */
-               if (!te->f) {
+               if (!te->header.f) {
                        done = 1;
                        if (!flush_all)
                                break;
                }
 
                /* Check the sample overflow count */
-               if (te->overflow)
+               if (te->header.overflow)
                        /* Account sample overflows and, if a particular limit
                         * is reached, extend the sampling buffer.
                         * For details, see sfb_account_overflows().
                         */
-                       sampl_overflow += te->overflow;
+                       sampl_overflow += te->header.overflow;
 
                /* Timestamps are valid for full sample-data-blocks only */
                debug_sprintf_event(sfdbg, 6, "%s: sdbt %#lx "
                                    "overflow %llu timestamp %#llx\n",
-                                   __func__, (unsigned long)sdbt, te->overflow,
-                                   (te->f) ? trailer_timestamp(te) : 0ULL);
+                                   __func__, (unsigned long)sdbt, te->header.overflow,
+                                   (te->header.f) ? trailer_timestamp(te) : 0ULL);
 
                /* Collect all samples from a single sample-data-block and
                 * flag if an (perf) event overflow happened.  If so, the PMU
@@ -1294,12 +1306,16 @@ static void hw_perf_event_update(struct perf_event *event, int flush_all)
                num_sdb++;
 
                /* Reset trailer (using compare-double-and-swap) */
+               /* READ_ONCE() 16 byte header */
+               prev.val = __cdsg(&te->header.val, 0, 0);
                do {
-                       te_flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
-                       te_flags |= SDB_TE_ALERT_REQ_MASK;
-               } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                        te->flags, te->overflow,
-                                        te_flags, 0ULL));
+                       old.val = prev.val;
+                       new.val = prev.val;
+                       new.f = 0;
+                       new.a = 1;
+                       new.overflow = 0;
+                       prev.val = __cdsg(&te->header.val, old.val, new.val);
+               } while (prev.val != old.val);
 
                /* Advance to next sample-data-block */
                sdbt++;
@@ -1384,7 +1400,7 @@ static void aux_output_end(struct perf_output_handle *handle)
        range_scan = AUX_SDB_NUM_ALERT(aux);
        for (i = 0, idx = aux->head; i < range_scan; i++, idx++) {
                te = aux_sdb_trailer(aux, idx);
-               if (!(te->flags & SDB_TE_BUFFER_FULL_MASK))
+               if (!te->header.f)
                        break;
        }
        /* i is num of SDBs which are full */
@@ -1392,7 +1408,7 @@ static void aux_output_end(struct perf_output_handle *handle)
 
        /* Remove alert indicators in the buffer */
        te = aux_sdb_trailer(aux, aux->alert_mark);
-       te->flags &= ~SDB_TE_ALERT_REQ_MASK;
+       te->header.a = 0;
 
        debug_sprintf_event(sfdbg, 6, "%s: SDBs %ld range %ld head %ld\n",
                            __func__, i, range_scan, aux->head);
@@ -1437,9 +1453,9 @@ static int aux_output_begin(struct perf_output_handle *handle,
                idx = aux->empty_mark + 1;
                for (i = 0; i < range_scan; i++, idx++) {
                        te = aux_sdb_trailer(aux, idx);
-                       te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
-                                      SDB_TE_ALERT_REQ_MASK);
-                       te->overflow = 0;
+                       te->header.f = 0;
+                       te->header.a = 0;
+                       te->header.overflow = 0;
                }
                /* Save the position of empty SDBs */
                aux->empty_mark = aux->head + range - 1;
@@ -1448,7 +1464,7 @@ static int aux_output_begin(struct perf_output_handle *handle,
        /* Set alert indicator */
        aux->alert_mark = aux->head + range/2 - 1;
        te = aux_sdb_trailer(aux, aux->alert_mark);
-       te->flags = te->flags | SDB_TE_ALERT_REQ_MASK;
+       te->header.a = 1;
 
        /* Reset hardware buffer head */
        head = AUX_SDB_INDEX(aux, aux->head);
@@ -1475,14 +1491,17 @@ static int aux_output_begin(struct perf_output_handle *handle,
 static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
                          unsigned long long *overflow)
 {
-       unsigned long long orig_overflow, orig_flags, new_flags;
+       union hws_trailer_header old, prev, new;
        struct hws_trailer_entry *te;
 
        te = aux_sdb_trailer(aux, alert_index);
+       /* READ_ONCE() 16 byte header */
+       prev.val = __cdsg(&te->header.val, 0, 0);
        do {
-               orig_flags = te->flags;
-               *overflow = orig_overflow = te->overflow;
-               if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+               old.val = prev.val;
+               new.val = prev.val;
+               *overflow = old.overflow;
+               if (old.f) {
                        /*
                         * SDB is already set by hardware.
                         * Abort and try to set somewhere
@@ -1490,10 +1509,10 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
                         */
                        return false;
                }
-               new_flags = orig_flags | SDB_TE_ALERT_REQ_MASK;
-       } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                orig_flags, orig_overflow,
-                                new_flags, 0ULL));
+               new.a = 1;
+               new.overflow = 0;
+               prev.val = __cdsg(&te->header.val, old.val, new.val);
+       } while (prev.val != old.val);
        return true;
 }
 
@@ -1522,8 +1541,9 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
 static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
                             unsigned long long *overflow)
 {
-       unsigned long long orig_overflow, orig_flags, new_flags;
        unsigned long i, range_scan, idx, idx_old;
+       union hws_trailer_header old, prev, new;
+       unsigned long long orig_overflow;
        struct hws_trailer_entry *te;
 
        debug_sprintf_event(sfdbg, 6, "%s: range %ld head %ld alert %ld "
@@ -1554,17 +1574,20 @@ static bool aux_reset_buffer(struct aux_buffer *aux, unsigned long range,
        idx_old = idx = aux->empty_mark + 1;
        for (i = 0; i < range_scan; i++, idx++) {
                te = aux_sdb_trailer(aux, idx);
+               /* READ_ONCE() 16 byte header */
+               prev.val = __cdsg(&te->header.val, 0, 0);
                do {
-                       orig_flags = te->flags;
-                       orig_overflow = te->overflow;
-                       new_flags = orig_flags & ~SDB_TE_BUFFER_FULL_MASK;
+                       old.val = prev.val;
+                       new.val = prev.val;
+                       orig_overflow = old.overflow;
+                       new.f = 0;
+                       new.overflow = 0;
                        if (idx == aux->alert_mark)
-                               new_flags |= SDB_TE_ALERT_REQ_MASK;
+                               new.a = 1;
                        else
-                               new_flags &= ~SDB_TE_ALERT_REQ_MASK;
-               } while (!cmpxchg_double(&te->flags, &te->overflow,
-                                        orig_flags, orig_overflow,
-                                        new_flags, 0ULL));
+                               new.a = 0;
+                       prev.val = __cdsg(&te->header.val, old.val, new.val);
+               } while (prev.val != old.val);
                *overflow += orig_overflow;
        }
 
index 2b6091349daa2553c504a362ed1e7339aff8bd62..696c9e007a369d8df883317bdde15c1717a5bfef 100644 (file)
@@ -508,6 +508,7 @@ static void __init setup_lowcore_dat_on(void)
 {
        struct lowcore *abs_lc;
        unsigned long flags;
+       int i;
 
        __ctl_clear_bit(0, 28);
        S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
@@ -523,8 +524,8 @@ static void __init setup_lowcore_dat_on(void)
        abs_lc = get_abs_lowcore(&flags);
        abs_lc->restart_flags = RESTART_FLAG_CTLREGS;
        abs_lc->program_new_psw = S390_lowcore.program_new_psw;
-       memcpy(abs_lc->cregs_save_area, S390_lowcore.cregs_save_area,
-              sizeof(abs_lc->cregs_save_area));
+       for (i = 0; i < 16; i++)
+               abs_lc->cregs_save_area[i] = S390_lowcore.cregs_save_area[i];
        put_abs_lowcore(abs_lc, flags);
 }
 
index 5ea3830af0ccff11c6a18a06a2392cf690742b34..cbf9c1b0beda4a8a82c5821fc97f23410d5ecd31 100644 (file)
@@ -17,6 +17,8 @@
 /* Handle ro_after_init data on our own. */
 #define RO_AFTER_INIT_DATA
 
+#define RUNTIME_DISCARD_EXIT
+
 #define EMITS_PT_NOTE
 
 #include <asm-generic/vmlinux.lds.h>
@@ -79,6 +81,7 @@ SECTIONS
                _end_amode31_refs = .;
        }
 
+       . = ALIGN(PAGE_SIZE);
        _edata = .;             /* End of data section */
 
        /* will be freed after init */
@@ -193,6 +196,7 @@ SECTIONS
 
        BSS_SECTION(PAGE_SIZE, 4 * PAGE_SIZE, PAGE_SIZE)
 
+       . = ALIGN(PAGE_SIZE);
        _end = . ;
 
        /*
index 1dae78deddf28602d32d051dc5ad956be6dd5315..ab26aa53ee3713bbc54bb4f4d08e9b6689c8de34 100644 (file)
@@ -83,8 +83,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
                struct esca_block *sca = vcpu->kvm->arch.sca;
                union esca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+               union esca_sigp_ctrl new_val = {0}, old_val;
 
+               old_val = READ_ONCE(*sigp_ctrl);
                new_val.scn = src_id;
                new_val.c = 1;
                old_val.c = 0;
@@ -95,8 +96,9 @@ static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
                struct bsca_block *sca = vcpu->kvm->arch.sca;
                union bsca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
+               union bsca_sigp_ctrl new_val = {0}, old_val;
 
+               old_val = READ_ONCE(*sigp_ctrl);
                new_val.scn = src_id;
                new_val.c = 1;
                old_val.c = 0;
@@ -126,16 +128,18 @@ static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
                struct esca_block *sca = vcpu->kvm->arch.sca;
                union esca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union esca_sigp_ctrl old = *sigp_ctrl;
+               union esca_sigp_ctrl old;
 
+               old = READ_ONCE(*sigp_ctrl);
                expect = old.value;
                rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
        } else {
                struct bsca_block *sca = vcpu->kvm->arch.sca;
                union bsca_sigp_ctrl *sigp_ctrl =
                        &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
-               union bsca_sigp_ctrl old = *sigp_ctrl;
+               union bsca_sigp_ctrl old;
 
+               old = READ_ONCE(*sigp_ctrl);
                expect = old.value;
                rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
        }
index a889a3a938bab85510dc136d73386c96615bb32d..d1ce73f3bd85ef18ba1d14d017744f812b2fe3ce 100644 (file)
@@ -28,7 +28,7 @@
 #define pmd_ERROR(e) \
        printk("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
 
-typedef struct {
+typedef union {
        struct {
                unsigned long pmd_low;
                unsigned long pmd_high;
index 5521ea12f44e068281ef99c309a5f6ebf736ff71..aa9b9645758433b088eeb579a9553b693dfa381a 100644 (file)
@@ -32,7 +32,7 @@ intcall:
        movw    %dx, %si
        movw    %sp, %di
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 
        /* Pop full state from the stack */
        popal
@@ -67,7 +67,7 @@ intcall:
        jz      4f
        movw    %sp, %si
        movw    $11, %cx
-       rep; movsd
+       rep; movsl
 4:     addw    $44, %sp
 
        /* Restore state and return */
index cfd4c95b9f045e564ee48885aac4534421619b99..669d9e4f290152cc94b881e529a7239859491a67 100644 (file)
@@ -386,8 +386,8 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
 {
        unsigned long *reg, val, vaddr;
        char buffer[MAX_INSN_SIZE];
+       enum insn_mmio_type mmio;
        struct insn insn = {};
-       enum mmio_type mmio;
        int size, extend_size;
        u8 extend_val = 0;
 
@@ -402,10 +402,10 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
                return -EINVAL;
 
        mmio = insn_decode_mmio(&insn, &size);
-       if (WARN_ON_ONCE(mmio == MMIO_DECODE_FAILED))
+       if (WARN_ON_ONCE(mmio == INSN_MMIO_DECODE_FAILED))
                return -EINVAL;
 
-       if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
+       if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
                reg = insn_get_modrm_reg_ptr(&insn, regs);
                if (!reg)
                        return -EINVAL;
@@ -426,23 +426,23 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
 
        /* Handle writes first */
        switch (mmio) {
-       case MMIO_WRITE:
+       case INSN_MMIO_WRITE:
                memcpy(&val, reg, size);
                if (!mmio_write(size, ve->gpa, val))
                        return -EIO;
                return insn.length;
-       case MMIO_WRITE_IMM:
+       case INSN_MMIO_WRITE_IMM:
                val = insn.immediate.value;
                if (!mmio_write(size, ve->gpa, val))
                        return -EIO;
                return insn.length;
-       case MMIO_READ:
-       case MMIO_READ_ZERO_EXTEND:
-       case MMIO_READ_SIGN_EXTEND:
+       case INSN_MMIO_READ:
+       case INSN_MMIO_READ_ZERO_EXTEND:
+       case INSN_MMIO_READ_SIGN_EXTEND:
                /* Reads are handled below */
                break;
-       case MMIO_MOVS:
-       case MMIO_DECODE_FAILED:
+       case INSN_MMIO_MOVS:
+       case INSN_MMIO_DECODE_FAILED:
                /*
                 * MMIO was accessed with an instruction that could not be
                 * decoded or handled properly. It was likely not using io.h
@@ -459,15 +459,15 @@ static int handle_mmio(struct pt_regs *regs, struct ve_info *ve)
                return -EIO;
 
        switch (mmio) {
-       case MMIO_READ:
+       case INSN_MMIO_READ:
                /* Zero-extend for 32-bit operation */
                extend_size = size == 4 ? sizeof(*reg) : 0;
                break;
-       case MMIO_READ_ZERO_EXTEND:
+       case INSN_MMIO_READ_ZERO_EXTEND:
                /* Zero extend based on operand size */
                extend_size = insn.opnd_bytes;
                break;
-       case MMIO_READ_SIGN_EXTEND:
+       case INSN_MMIO_READ_SIGN_EXTEND:
                /* Sign extend based on operand size */
                extend_size = insn.opnd_bytes;
                if (size == 1 && val & BIT(7))
index d6f3703e41194aa11e2e32249f36e1369012b992..4386b10682ce4fd2ded0045eeb352c42fd056242 100644 (file)
@@ -1387,7 +1387,7 @@ static int __init amd_core_pmu_init(void)
                 * numbered counter following it.
                 */
                for (i = 0; i < x86_pmu.num_counters - 1; i += 2)
-                       even_ctr_mask |= 1 << i;
+                       even_ctr_mask |= BIT_ULL(i);
 
                pair_constraint = (struct event_constraint)
                                    __EVENT_CONSTRAINT(0, even_ctr_mask, 0,
index a2834bc93149a735295706e2599e5f4b5b667678..3019fb1926e358ad12d50ba2fe1392bbbf235069 100644 (file)
@@ -41,6 +41,7 @@
  *     MSR_CORE_C1_RES: CORE C1 Residency Counter
  *                      perf code: 0x00
  *                      Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL
+ *                                       MTL
  *                      Scope: Core (each processor core has a MSR)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL,SPR
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR,MTL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
  *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
- *                                             ICL,TGL,RKL,ADL,RPL
+ *                                             ICL,TGL,RKL,ADL,RPL,MTL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
  *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
  *                                             KBL,CML,ICL,ICX,TGL,TNT,RKL,ADL,
- *                                             RPL,SPR
+ *                                             RPL,SPR,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
  *                                             GLM,CNL,KBL,CML,ICL,TGL,TNT,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
  *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX,
- *                                             TGL,TNT,RKL,ADL,RPL,SPR
+ *                                             TGL,TNT,RKL,ADL,RPL,SPR,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
- *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL
+ *                                             KBL,CML,ICL,TGL,RKL,ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
  *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL,RKL,
- *                                             ADL,RPL
+ *                                             ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
  *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
- *                                             TNT,RKL,ADL,RPL
+ *                                             TNT,RKL,ADL,RPL,MTL
  *                            Scope: Package (physical package)
  *
  */
@@ -686,6 +687,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &adl_cstates),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          &adl_cstates),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        &adl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 6f1ccc57a6921fd0ffd2d7f924f96bf0e2907a6c..459b1aafd4d4a3d36fcce623575ba0beaa9aca3f 100644 (file)
@@ -1833,6 +1833,7 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &adl_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &spr_uncore_init),
+       X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,     &spr_uncore_init),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D,      &snr_uncore_init),
        {},
 };
index ecced3a52668a129e3fbc38d38facaeff9c2ca61..c65d8906cbcf4de7e41f53a9a48d1625ca7cd4a2 100644 (file)
@@ -69,6 +69,7 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_BROADWELL_G:
        case INTEL_FAM6_BROADWELL_X:
        case INTEL_FAM6_SAPPHIRERAPIDS_X:
+       case INTEL_FAM6_EMERALDRAPIDS_X:
 
        case INTEL_FAM6_ATOM_SILVERMONT:
        case INTEL_FAM6_ATOM_SILVERMONT_D:
@@ -107,6 +108,8 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_RAPTORLAKE:
        case INTEL_FAM6_RAPTORLAKE_P:
        case INTEL_FAM6_RAPTORLAKE_S:
+       case INTEL_FAM6_METEORLAKE:
+       case INTEL_FAM6_METEORLAKE_L:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index a829492bca4c193ab3ab30b85bd1fbb4e73b2b26..52e6e7ed4f78a655414ce1b7548c8f0b7d5d8822 100644 (file)
@@ -800,13 +800,18 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
        X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,           &model_hsx),
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L,         &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE,           &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L,         &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE,           &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE,           &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L,         &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N,         &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X,    &model_spr),
+       X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X,     &model_spr),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P,        &model_skl),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          &model_skl),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        &model_skl),
        {},
 };
 MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
index f07faa61c7f3118e581a6f88114623f78a7dbde1..54368a43abf67e455dbea4ca800e51e3196759dc 100644 (file)
@@ -32,16 +32,16 @@ int insn_fetch_from_user_inatomic(struct pt_regs *regs,
 bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
                           unsigned char buf[MAX_INSN_SIZE], int buf_size);
 
-enum mmio_type {
-       MMIO_DECODE_FAILED,
-       MMIO_WRITE,
-       MMIO_WRITE_IMM,
-       MMIO_READ,
-       MMIO_READ_ZERO_EXTEND,
-       MMIO_READ_SIGN_EXTEND,
-       MMIO_MOVS,
+enum insn_mmio_type {
+       INSN_MMIO_DECODE_FAILED,
+       INSN_MMIO_WRITE,
+       INSN_MMIO_WRITE_IMM,
+       INSN_MMIO_READ,
+       INSN_MMIO_READ_ZERO_EXTEND,
+       INSN_MMIO_READ_SIGN_EXTEND,
+       INSN_MMIO_MOVS,
 };
 
-enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
+enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes);
 
 #endif /* _ASM_X86_INSN_EVAL_H */
index f35f1ff4427bb85273abab37fe40a9154bf11cd9..6aaae18f1854474d7811d0d41081c39fba27d3d8 100644 (file)
@@ -1111,6 +1111,7 @@ struct msr_bitmap_range {
 
 /* Xen emulation context */
 struct kvm_xen {
+       struct mutex xen_lock;
        u32 xen_version;
        bool long_mode;
        bool runstate_update_flag;
index 7d2c75ec9a8cd9d145085f4b9007a8ce4136ede9..ffea98f9064be6526d7d122c7d60322a44475ada 100644 (file)
@@ -119,7 +119,7 @@ static bool is_coretext(const struct core_text *ct, void *addr)
        return within_module_coretext(addr);
 }
 
-static __init_or_module bool skip_addr(void *dest)
+static bool skip_addr(void *dest)
 {
        if (dest == error_entry)
                return true;
@@ -181,7 +181,7 @@ static const u8 nops[] = {
        0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
 };
 
-static __init_or_module void *patch_dest(void *dest, bool direct)
+static void *patch_dest(void *dest, bool direct)
 {
        unsigned int tsize = SKL_TMPL_SIZE;
        u8 *pad = dest - tsize;
index d970ddb0cc65bedf358c32321d202036cf792923..bca0bd8f4846413831da02ec7eee8ae9c12abd12 100644 (file)
@@ -1981,6 +1981,8 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
                if (ctrl == PR_SPEC_FORCE_DISABLE)
                        task_set_spec_ib_force_disable(task);
                task_update_spec_tif(task);
+               if (task == current)
+                       indirect_branch_prediction_barrier();
                break;
        default:
                return -ERANGE;
index efe0c30d3a12d5f126a129d81bcac921dda97252..77538abeb72afe7b99736efbe552d8293bdd8b60 100644 (file)
@@ -146,6 +146,30 @@ static inline struct rmid_entry *__rmid_entry(u32 rmid)
        return entry;
 }
 
+static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
+{
+       u64 msr_val;
+
+       /*
+        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
+        * with a valid event code for supported resource type and the bits
+        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
+        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
+        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
+        * are error bits.
+        */
+       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
+       rdmsrl(MSR_IA32_QM_CTR, msr_val);
+
+       if (msr_val & RMID_VAL_ERROR)
+               return -EIO;
+       if (msr_val & RMID_VAL_UNAVAIL)
+               return -EINVAL;
+
+       *val = msr_val;
+       return 0;
+}
+
 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
                                                 u32 rmid,
                                                 enum resctrl_event_id eventid)
@@ -172,8 +196,12 @@ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
        struct arch_mbm_state *am;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
-       if (am)
+       if (am) {
                memset(am, 0, sizeof(*am));
+
+               /* Record any initial, non-zero count value. */
+               __rmid_read(rmid, eventid, &am->prev_msr);
+       }
 }
 
 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
@@ -191,25 +219,14 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
        struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
        struct arch_mbm_state *am;
        u64 msr_val, chunks;
+       int ret;
 
        if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
                return -EINVAL;
 
-       /*
-        * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
-        * with a valid event code for supported resource type and the bits
-        * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
-        * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
-        * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
-        * are error bits.
-        */
-       wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
-       rdmsrl(MSR_IA32_QM_CTR, msr_val);
-
-       if (msr_val & RMID_VAL_ERROR)
-               return -EIO;
-       if (msr_val & RMID_VAL_UNAVAIL)
-               return -EINVAL;
+       ret = __rmid_read(rmid, eventid, &msr_val);
+       if (ret)
+               return ret;
 
        am = get_arch_mbm_state(hw_dom, rmid, eventid);
        if (am) {
index e5a48f05e7876adcfb942e48f930603c2375fd19..5993da21d82255737638c6684c74a83836463dd1 100644 (file)
@@ -580,8 +580,10 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
        /*
         * Ensure the task's closid and rmid are written before determining if
         * the task is current that will decide if it will be interrupted.
+        * This pairs with the full barrier between the rq->curr update and
+        * resctrl_sched_in() during context switch.
         */
-       barrier();
+       smp_mb();
 
        /*
         * By now, the task's closid and rmid are set. If the task is current
@@ -2401,6 +2403,14 @@ static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
                        WRITE_ONCE(t->closid, to->closid);
                        WRITE_ONCE(t->rmid, to->mon.rmid);
 
+                       /*
+                        * Order the closid/rmid stores above before the loads
+                        * in task_curr(). This pairs with the full barrier
+                        * between the rq->curr update and resctrl_sched_in()
+                        * during context switch.
+                        */
+                       smp_mb();
+
                        /*
                         * If the task is on a CPU, set the CPU in the mask.
                         * The detection is inaccurate as tasks might move or
index 9730c88530fc8b4c0a6c0bfc770acd087524ea10..305514431f26e03d9b50709cdaa4a2732f74e5c3 100644 (file)
@@ -401,10 +401,8 @@ int crash_load_segments(struct kimage *image)
        kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
        kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
        ret = kexec_add_buffer(&kbuf);
-       if (ret) {
-               vfree((void *)image->elf_headers);
+       if (ret)
                return ret;
-       }
        image->elf_load_addr = kbuf.mem;
        pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
                 image->elf_load_addr, kbuf.bufsz, kbuf.memsz);
index 66299682b6b7e24305160f6282ff6c40f07f82d4..b36f3c367cb24c5e8fcec80271098d1ead7e727e 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/extable.h>
 #include <linux/kdebug.h>
 #include <linux/kallsyms.h>
+#include <linux/kgdb.h>
 #include <linux/ftrace.h>
 #include <linux/kasan.h>
 #include <linux/moduleloader.h>
@@ -281,12 +282,15 @@ static int can_probe(unsigned long paddr)
                if (ret < 0)
                        return 0;
 
+#ifdef CONFIG_KGDB
                /*
-                * Another debugging subsystem might insert this breakpoint.
-                * In that case, we can't recover it.
+                * If there is a dynamically installed kgdb sw breakpoint,
+                * this function should not be probed.
                 */
-               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
+                   kgdb_has_hit_break(addr))
                        return 0;
+#endif
                addr += insn.length;
        }
 
index e6b8c5362b945586bc08f1c4817aeba512f9cc34..e57e07b0edb64cf5b4d82d64adfc61de94c60424 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/extable.h>
 #include <linux/kdebug.h>
 #include <linux/kallsyms.h>
+#include <linux/kgdb.h>
 #include <linux/ftrace.h>
 #include <linux/objtool.h>
 #include <linux/pgtable.h>
@@ -279,19 +280,6 @@ static int insn_is_indirect_jump(struct insn *insn)
        return ret;
 }
 
-static bool is_padding_int3(unsigned long addr, unsigned long eaddr)
-{
-       unsigned char ops;
-
-       for (; addr < eaddr; addr++) {
-               if (get_kernel_nofault(ops, (void *)addr) < 0 ||
-                   ops != INT3_INSN_OPCODE)
-                       return false;
-       }
-
-       return true;
-}
-
 /* Decode whole function to ensure any instructions don't jump into target */
 static int can_optimize(unsigned long paddr)
 {
@@ -334,15 +322,15 @@ static int can_optimize(unsigned long paddr)
                ret = insn_decode_kernel(&insn, (void *)recovered_insn);
                if (ret < 0)
                        return 0;
-
+#ifdef CONFIG_KGDB
                /*
-                * In the case of detecting unknown breakpoint, this could be
-                * a padding INT3 between functions. Let's check that all the
-                * rest of the bytes are also INT3.
+                * If there is a dynamically installed kgdb sw breakpoint,
+                * this function should not be probed.
                 */
-               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE)
-                       return is_padding_int3(addr, paddr - offset + size) ? 1 : 0;
-
+               if (insn.opcode.bytes[0] == INT3_INSN_OPCODE &&
+                   kgdb_has_hit_break(addr))
+                       return 0;
+#endif
                /* Recover address */
                insn.kaddr = (void *)addr;
                insn.next_byte = (void *)(addr + insn.length);
index a428c62330d371a6742ada0a2e9e578dd3f1def6..679026a640efd8e2e87a03d3b5e57cf18de1a6dd 100644 (file)
@@ -1536,32 +1536,32 @@ static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
 {
        struct insn *insn = &ctxt->insn;
+       enum insn_mmio_type mmio;
        unsigned int bytes = 0;
-       enum mmio_type mmio;
        enum es_result ret;
        u8 sign_byte;
        long *reg_data;
 
        mmio = insn_decode_mmio(insn, &bytes);
-       if (mmio == MMIO_DECODE_FAILED)
+       if (mmio == INSN_MMIO_DECODE_FAILED)
                return ES_DECODE_FAILED;
 
-       if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
+       if (mmio != INSN_MMIO_WRITE_IMM && mmio != INSN_MMIO_MOVS) {
                reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
                if (!reg_data)
                        return ES_DECODE_FAILED;
        }
 
        switch (mmio) {
-       case MMIO_WRITE:
+       case INSN_MMIO_WRITE:
                memcpy(ghcb->shared_buffer, reg_data, bytes);
                ret = vc_do_mmio(ghcb, ctxt, bytes, false);
                break;
-       case MMIO_WRITE_IMM:
+       case INSN_MMIO_WRITE_IMM:
                memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
                ret = vc_do_mmio(ghcb, ctxt, bytes, false);
                break;
-       case MMIO_READ:
+       case INSN_MMIO_READ:
                ret = vc_do_mmio(ghcb, ctxt, bytes, true);
                if (ret)
                        break;
@@ -1572,7 +1572,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
 
                memcpy(reg_data, ghcb->shared_buffer, bytes);
                break;
-       case MMIO_READ_ZERO_EXTEND:
+       case INSN_MMIO_READ_ZERO_EXTEND:
                ret = vc_do_mmio(ghcb, ctxt, bytes, true);
                if (ret)
                        break;
@@ -1581,7 +1581,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
                memset(reg_data, 0, insn->opnd_bytes);
                memcpy(reg_data, ghcb->shared_buffer, bytes);
                break;
-       case MMIO_READ_SIGN_EXTEND:
+       case INSN_MMIO_READ_SIGN_EXTEND:
                ret = vc_do_mmio(ghcb, ctxt, bytes, true);
                if (ret)
                        break;
@@ -1600,7 +1600,7 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
                memset(reg_data, sign_byte, insn->opnd_bytes);
                memcpy(reg_data, ghcb->shared_buffer, bytes);
                break;
-       case MMIO_MOVS:
+       case INSN_MMIO_MOVS:
                ret = vc_handle_mmio_movs(ctxt, bytes);
                break;
        default:
index b14653b61470c2720267006f7e7166ebac159613..596061c1610e671885705baa3682af326bc93294 100644 (file)
@@ -770,16 +770,22 @@ struct kvm_cpuid_array {
        int nent;
 };
 
+static struct kvm_cpuid_entry2 *get_next_cpuid(struct kvm_cpuid_array *array)
+{
+       if (array->nent >= array->maxnent)
+               return NULL;
+
+       return &array->entries[array->nent++];
+}
+
 static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
                                              u32 function, u32 index)
 {
-       struct kvm_cpuid_entry2 *entry;
+       struct kvm_cpuid_entry2 *entry = get_next_cpuid(array);
 
-       if (array->nent >= array->maxnent)
+       if (!entry)
                return NULL;
 
-       entry = &array->entries[array->nent++];
-
        memset(entry, 0, sizeof(*entry));
        entry->function = function;
        entry->index = index;
@@ -956,22 +962,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->edx = edx.full;
                break;
        }
-       /*
-        * Per Intel's SDM, the 0x1f is a superset of 0xb,
-        * thus they can be handled by common code.
-        */
        case 0x1f:
        case 0xb:
                /*
-                * Populate entries until the level type (ECX[15:8]) of the
-                * previous entry is zero.  Note, CPUID EAX.{0x1f,0xb}.0 is
-                * the starting entry, filled by the primary do_host_cpuid().
+                * No topology; a valid topology is indicated by the presence
+                * of subleaf 1.
                 */
-               for (i = 1; entry->ecx & 0xff00; ++i) {
-                       entry = do_host_cpuid(array, function, i);
-                       if (!entry)
-                               goto out;
-               }
+               entry->eax = entry->ebx = entry->ecx = 0;
                break;
        case 0xd: {
                u64 permitted_xcr0 = kvm_caps.supported_xcr0 & xstate_get_guest_group_perm();
@@ -1202,6 +1199,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
                entry->ebx = entry->ecx = entry->edx = 0;
                break;
        case 0x8000001e:
+               /* Do not return host topology information.  */
+               entry->eax = entry->ebx = entry->ecx = 0;
+               entry->edx = 0; /* reserved */
                break;
        case 0x8000001F:
                if (!kvm_cpu_cap_has(X86_FEATURE_SEV)) {
index 2c7f2a26421e825097982d800a03d09435e9917a..e8296942a86821c9ccd3e47c258eec390197f56e 100644 (file)
@@ -1769,6 +1769,7 @@ static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_ba
 }
 
 struct kvm_hv_hcall {
+       /* Hypercall input data */
        u64 param;
        u64 ingpa;
        u64 outgpa;
@@ -1779,12 +1780,21 @@ struct kvm_hv_hcall {
        bool fast;
        bool rep;
        sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
+
+       /*
+        * Current read offset when KVM reads hypercall input data gradually,
+        * either offset in bytes from 'ingpa' for regular hypercalls or the
+        * number of already consumed 'XMM halves' for 'fast' hypercalls.
+        */
+       union {
+               gpa_t data_offset;
+               int consumed_xmm_halves;
+       };
 };
 
 
 static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
-                             u16 orig_cnt, u16 cnt_cap, u64 *data,
-                             int consumed_xmm_halves, gpa_t offset)
+                             u16 orig_cnt, u16 cnt_cap, u64 *data)
 {
        /*
         * Preserve the original count when ignoring entries via a "cap", KVM
@@ -1799,11 +1809,11 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
                 * Each XMM holds two sparse banks, but do not count halves that
                 * have already been consumed for hypercall parameters.
                 */
-               if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - consumed_xmm_halves)
+               if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
 
                for (i = 0; i < cnt; i++) {
-                       j = i + consumed_xmm_halves;
+                       j = i + hc->consumed_xmm_halves;
                        if (j % 2)
                                data[i] = sse128_hi(hc->xmm[j / 2]);
                        else
@@ -1812,27 +1822,24 @@ static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
                return 0;
        }
 
-       return kvm_read_guest(kvm, hc->ingpa + offset, data,
+       return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
                              cnt * sizeof(*data));
 }
 
 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
-                                u64 *sparse_banks, int consumed_xmm_halves,
-                                gpa_t offset)
+                                u64 *sparse_banks)
 {
        if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
                return -EINVAL;
 
        /* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
        return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
-                                 sparse_banks, consumed_xmm_halves, offset);
+                                 sparse_banks);
 }
 
-static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[],
-                                       int consumed_xmm_halves, gpa_t offset)
+static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
 {
-       return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt,
-                                 entries, consumed_xmm_halves, offset);
+       return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
 }
 
 static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
@@ -1926,8 +1933,6 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
        struct kvm_vcpu *v;
        unsigned long i;
        bool all_cpus;
-       int consumed_xmm_halves = 0;
-       gpa_t data_offset;
 
        /*
         * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
@@ -1955,12 +1960,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                        flush.address_space = hc->ingpa;
                        flush.flags = hc->outgpa;
                        flush.processor_mask = sse128_lo(hc->xmm[0]);
-                       consumed_xmm_halves = 1;
+                       hc->consumed_xmm_halves = 1;
                } else {
                        if (unlikely(kvm_read_guest(kvm, hc->ingpa,
                                                    &flush, sizeof(flush))))
                                return HV_STATUS_INVALID_HYPERCALL_INPUT;
-                       data_offset = sizeof(flush);
+                       hc->data_offset = sizeof(flush);
                }
 
                trace_kvm_hv_flush_tlb(flush.processor_mask,
@@ -1985,12 +1990,12 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                        flush_ex.flags = hc->outgpa;
                        memcpy(&flush_ex.hv_vp_set,
                               &hc->xmm[0], sizeof(hc->xmm[0]));
-                       consumed_xmm_halves = 2;
+                       hc->consumed_xmm_halves = 2;
                } else {
                        if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
                                                    sizeof(flush_ex))))
                                return HV_STATUS_INVALID_HYPERCALL_INPUT;
-                       data_offset = sizeof(flush_ex);
+                       hc->data_offset = sizeof(flush_ex);
                }
 
                trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
@@ -2009,8 +2014,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                        if (!hc->var_cnt)
                                goto ret_success;
 
-                       if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks,
-                                                 consumed_xmm_halves, data_offset))
+                       if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
                                return HV_STATUS_INVALID_HYPERCALL_INPUT;
                }
 
@@ -2021,8 +2025,10 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                 * consumed_xmm_halves to make sure TLB flush entries are read
                 * from the correct offset.
                 */
-               data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
-               consumed_xmm_halves += hc->var_cnt;
+               if (hc->fast)
+                       hc->consumed_xmm_halves += hc->var_cnt;
+               else
+                       hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
        }
 
        if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
@@ -2030,8 +2036,7 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
            hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
                tlb_flush_entries = NULL;
        } else {
-               if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries,
-                                               consumed_xmm_halves, data_offset))
+               if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
                tlb_flush_entries = __tlb_flush_entries;
        }
@@ -2180,9 +2185,13 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
                if (!hc->var_cnt)
                        goto ret_success;
 
-               if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks, 1,
-                                         offsetof(struct hv_send_ipi_ex,
-                                                  vp_set.bank_contents)))
+               if (!hc->fast)
+                       hc->data_offset = offsetof(struct hv_send_ipi_ex,
+                                                  vp_set.bank_contents);
+               else
+                       hc->consumed_xmm_halves = 1;
+
+               if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
                        return HV_STATUS_INVALID_HYPERCALL_INPUT;
        }
 
index 0687162c4f227d8ee9bfd45c70d2d5f7e3c70535..3742d9adacfc1a7b01b2b0f5431953ba16fb43ca 100644 (file)
@@ -426,8 +426,9 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
                        kvm_set_msi_irq(vcpu->kvm, entry, &irq);
 
                        if (irq.trig_mode &&
-                           kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
-                                               irq.dest_id, irq.dest_mode))
+                           (kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
+                                                irq.dest_id, irq.dest_mode) ||
+                            kvm_apic_pending_eoi(vcpu, irq.vector)))
                                __set_bit(irq.vector, ioapic_handled_vectors);
                }
        }
index 28e3769066e215332197df09381ec67252a67a5d..58c3242fcc7ad9138af4c9654175cc9d682e301c 100644 (file)
@@ -188,11 +188,11 @@ static inline bool lapic_in_kernel(struct kvm_vcpu *vcpu)
 
 extern struct static_key_false_deferred apic_hw_disabled;
 
-static inline int kvm_apic_hw_enabled(struct kvm_lapic *apic)
+static inline bool kvm_apic_hw_enabled(struct kvm_lapic *apic)
 {
        if (static_branch_unlikely(&apic_hw_disabled.key))
                return apic->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
-       return MSR_IA32_APICBASE_ENABLE;
+       return true;
 }
 
 extern struct static_key_false_deferred apic_sw_disabled;
index 1f03701b943a1cac91fe7dac6a5f3514c5b9d6dc..6f54dc9409c943f7d148400bbb498185e061cc3e 100644 (file)
@@ -363,7 +363,7 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
  * A shadow-present leaf SPTE may be non-writable for 4 possible reasons:
  *
  *  1. To intercept writes for dirty logging. KVM write-protects huge pages
- *     so that they can be split be split down into the dirty logging
+ *     so that they can be split down into the dirty logging
  *     granularity (4KiB) whenever the guest writes to them. KVM also
  *     write-protects 4KiB pages so that writes can be recorded in the dirty log
  *     (e.g. if not using PML). SPTEs are write-protected for dirty logging
index 771210ce518112f5d8446d9204e4d0eeec9ee241..d6df38d371a00c6992a136cc07cd1469eaad1804 100644 (file)
@@ -1074,7 +1074,9 @@ static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu,
        int ret = RET_PF_FIXED;
        bool wrprot = false;
 
-       WARN_ON(sp->role.level != fault->goal_level);
+       if (WARN_ON_ONCE(sp->role.level != fault->goal_level))
+               return RET_PF_RETRY;
+
        if (unlikely(!fault->slot))
                new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
        else
@@ -1173,9 +1175,6 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                if (fault->nx_huge_page_workaround_enabled)
                        disallowed_hugepage_adjust(fault, iter.old_spte, iter.level);
 
-               if (iter.level == fault->goal_level)
-                       break;
-
                /*
                 * If SPTE has been frozen by another thread, just give up and
                 * retry, avoiding unnecessary page table allocation and free.
@@ -1183,6 +1182,9 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                if (is_removed_spte(iter.old_spte))
                        goto retry;
 
+               if (iter.level == fault->goal_level)
+                       goto map_target_level;
+
                /* Step down into the lower level page table if it exists. */
                if (is_shadow_present_pte(iter.old_spte) &&
                    !is_large_pte(iter.old_spte))
@@ -1203,8 +1205,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                        r = tdp_mmu_link_sp(kvm, &iter, sp, true);
 
                /*
-                * Also force the guest to retry the access if the upper level SPTEs
-                * aren't in place.
+                * Force the guest to retry if installing an upper level SPTE
+                * failed, e.g. because a different task modified the SPTE.
                 */
                if (r) {
                        tdp_mmu_free_sp(sp);
@@ -1214,11 +1216,20 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
                if (fault->huge_page_disallowed &&
                    fault->req_level >= iter.level) {
                        spin_lock(&kvm->arch.tdp_mmu_pages_lock);
-                       track_possible_nx_huge_page(kvm, sp);
+                       if (sp->nx_huge_page_disallowed)
+                               track_possible_nx_huge_page(kvm, sp);
                        spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
                }
        }
 
+       /*
+        * The walk aborted before reaching the target level, e.g. because the
+        * iterator detected an upper level SPTE was frozen during traversal.
+        */
+       WARN_ON_ONCE(iter.level == fault->goal_level);
+       goto retry;
+
+map_target_level:
        ret = tdp_mmu_map_handle_target_level(vcpu, fault, &iter);
 
 retry:
index 684393c2210523d917675f6709fa7ab1dd5fd8ed..eb594620dd75a1f7fe46190d782f80865a6c4dde 100644 (file)
@@ -238,7 +238,8 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
                return false;
 
        /* recalibrate sample period and check if it's accepted by perf core */
-       if (perf_event_period(pmc->perf_event,
+       if (is_sampling_event(pmc->perf_event) &&
+           perf_event_period(pmc->perf_event,
                              get_sample_period(pmc, pmc->counter)))
                return false;
 
index 85ff3c0588bac01e0dee3edd607c33d4aca347ba..cdb91009701dd16421a84d39dd5f48b19ab32f81 100644 (file)
@@ -140,7 +140,8 @@ static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value)
 
 static inline void pmc_update_sample_period(struct kvm_pmc *pmc)
 {
-       if (!pmc->perf_event || pmc->is_paused)
+       if (!pmc->perf_event || pmc->is_paused ||
+           !is_sampling_event(pmc->perf_event))
                return;
 
        perf_event_period(pmc->perf_event,
index bc9cd7086fa972a8c9dd0600f7316278da4029ec..add65dd597569e70b484b18c9453d21723aeff5f 100644 (file)
@@ -138,15 +138,13 @@ void recalc_intercepts(struct vcpu_svm *svm)
                c->intercepts[i] = h->intercepts[i];
 
        if (g->int_ctl & V_INTR_MASKING_MASK) {
-               /* We only want the cr8 intercept bits of L1 */
-               vmcb_clr_intercept(c, INTERCEPT_CR8_READ);
-               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
-
                /*
-                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF does not
-                * affect any interrupt we may want to inject; therefore,
-                * interrupt window vmexits are irrelevant to L0.
+                * Once running L2 with HF_VINTR_MASK, EFLAGS.IF and CR8
+                * does not affect any interrupt we may want to inject;
+                * therefore, writes to CR8 are irrelevant to L0, as are
+                * interrupt window vmexits.
                 */
+               vmcb_clr_intercept(c, INTERCEPT_CR8_WRITE);
                vmcb_clr_intercept(c, INTERCEPT_VINTR);
        }
 
index b6f4411b613e9105d8cca1a989ee8f3a4a2a7197..d93c715cda6ab3002d933f6b7111375f03d4fbd5 100644 (file)
@@ -5296,10 +5296,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
                if (vmptr == vmx->nested.current_vmptr)
                        nested_release_vmcs12(vcpu);
 
-               kvm_vcpu_write_guest(vcpu,
-                                    vmptr + offsetof(struct vmcs12,
-                                                     launch_state),
-                                    &zero, sizeof(zero));
+               /*
+                * Silently ignore memory errors on VMCLEAR, Intel's pseudocode
+                * for VMCLEAR includes a "ensure that data for VMCS referenced
+                * by the operand is in memory" clause that guards writes to
+                * memory, i.e. doing nothing for I/O is architecturally valid.
+                *
+                * FIXME: Suppress failures if and only if no memslot is found,
+                * i.e. exit to userspace if __copy_to_user() fails.
+                */
+               (void)kvm_vcpu_write_guest(vcpu,
+                                          vmptr + offsetof(struct vmcs12,
+                                                           launch_state),
+                                          &zero, sizeof(zero));
        } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
                nested_release_evmcs(vcpu);
        }
@@ -6873,7 +6882,8 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
                SECONDARY_EXEC_ENABLE_INVPCID |
                SECONDARY_EXEC_RDSEED_EXITING |
                SECONDARY_EXEC_XSAVES |
-               SECONDARY_EXEC_TSC_SCALING;
+               SECONDARY_EXEC_TSC_SCALING |
+               SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
 
        /*
         * We can emulate "VMCS shadowing," even if the hardware
index fe5615fd8295c4f615f5fe44785f24617b042a0d..fc9008dbed33487b472e4966a20b56cbcc4c4580 100644 (file)
@@ -4459,6 +4459,13 @@ vmx_adjust_secondary_exec_control(struct vcpu_vmx *vmx, u32 *exec_control,
         * controls for features that are/aren't exposed to the guest.
         */
        if (nested) {
+               /*
+                * All features that can be added or removed to VMX MSRs must
+                * be supported in the first place for nested virtualization.
+                */
+               if (WARN_ON_ONCE(!(vmcs_config.nested.secondary_ctls_high & control)))
+                       enabled = false;
+
                if (enabled)
                        vmx->nested.msrs.secondary_ctls_high |= control;
                else
index 312aea1854ae6b8c8b4a64884ffaf9b9605c6e29..da4bbd043a7b6c35dd1d94e193ab35c3e8148257 100644 (file)
@@ -13132,6 +13132,9 @@ int kvm_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
                              struct x86_exception *e)
 {
        if (r == X86EMUL_PROPAGATE_FAULT) {
+               if (KVM_BUG_ON(!e, vcpu->kvm))
+                       return -EIO;
+
                kvm_inject_emulated_page_fault(vcpu, e);
                return 1;
        }
index d7af402402484bb008f253213a86c186224a34c1..8fd41f5deae39f605b4865382a3c5b6e58dc0467 100644 (file)
@@ -41,7 +41,7 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
        int ret = 0;
        int idx = srcu_read_lock(&kvm->srcu);
 
-       if (gfn == GPA_INVALID) {
+       if (gfn == KVM_XEN_INVALID_GFN) {
                kvm_gpc_deactivate(gpc);
                goto out;
        }
@@ -271,7 +271,15 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
         * Attempt to obtain the GPC lock on *both* (if there are two)
         * gfn_to_pfn caches that cover the region.
         */
-       read_lock_irqsave(&gpc1->lock, flags);
+       if (atomic) {
+               local_irq_save(flags);
+               if (!read_trylock(&gpc1->lock)) {
+                       local_irq_restore(flags);
+                       return;
+               }
+       } else {
+               read_lock_irqsave(&gpc1->lock, flags);
+       }
        while (!kvm_gpc_check(gpc1, user_len1)) {
                read_unlock_irqrestore(&gpc1->lock, flags);
 
@@ -304,9 +312,18 @@ static void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, bool atomic)
                 * The guest's runstate_info is split across two pages and we
                 * need to hold and validate both GPCs simultaneously. We can
                 * declare a lock ordering GPC1 > GPC2 because nothing else
-                * takes them more than one at a time.
+                * takes them more than one at a time. Set a subclass on the
+                * gpc1 lock to make lockdep shut up about it.
                 */
-               read_lock(&gpc2->lock);
+               lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_);
+               if (atomic) {
+                       if (!read_trylock(&gpc2->lock)) {
+                               read_unlock_irqrestore(&gpc1->lock, flags);
+                               return;
+                       }
+               } else {
+                       read_lock(&gpc2->lock);
+               }
 
                if (!kvm_gpc_check(gpc2, user_len2)) {
                        read_unlock(&gpc2->lock);
@@ -590,26 +607,26 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) {
                        r = -EINVAL;
                } else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.long_mode = !!data->u.long_mode;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
 
        case KVM_XEN_ATTR_TYPE_SHARED_INFO:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                break;
 
        case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
                if (data->u.vector && data->u.vector < 0x10)
                        r = -EINVAL;
                else {
-                       mutex_lock(&kvm->lock);
+                       mutex_lock(&kvm->arch.xen.xen_lock);
                        kvm->arch.xen.upcall_vector = data->u.vector;
-                       mutex_unlock(&kvm->lock);
+                       mutex_unlock(&kvm->arch.xen.xen_lock);
                        r = 0;
                }
                break;
@@ -619,9 +636,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
 
        case KVM_XEN_ATTR_TYPE_XEN_VERSION:
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.xen_version = data->u.xen_version;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -630,9 +647,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                        r = -EOPNOTSUPP;
                        break;
                }
-               mutex_lock(&kvm->lock);
+               mutex_lock(&kvm->arch.xen.xen_lock);
                kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag;
-               mutex_unlock(&kvm->lock);
+               mutex_unlock(&kvm->arch.xen.xen_lock);
                r = 0;
                break;
 
@@ -647,7 +664,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_ATTR_TYPE_LONG_MODE:
@@ -659,7 +676,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                if (kvm->arch.xen.shinfo_cache.active)
                        data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
                else
-                       data->u.shared_info.gfn = GPA_INVALID;
+                       data->u.shared_info.gfn = KVM_XEN_INVALID_GFN;
                r = 0;
                break;
 
@@ -686,7 +703,7 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
                break;
        }
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -694,7 +711,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int idx, r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        switch (data->type) {
@@ -705,7 +722,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                BUILD_BUG_ON(offsetof(struct vcpu_info, time) !=
                             offsetof(struct compat_vcpu_info, time));
 
-               if (data->u.gpa == GPA_INVALID) {
+               if (data->u.gpa == KVM_XEN_INVALID_GPA) {
                        kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache);
                        r = 0;
                        break;
@@ -719,7 +736,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                break;
 
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
-               if (data->u.gpa == GPA_INVALID) {
+               if (data->u.gpa == KVM_XEN_INVALID_GPA) {
                        kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache);
                        r = 0;
                        break;
@@ -739,7 +756,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                        r = -EOPNOTSUPP;
                        break;
                }
-               if (data->u.gpa == GPA_INVALID) {
+               if (data->u.gpa == KVM_XEN_INVALID_GPA) {
                        r = 0;
                deactivate_out:
                        kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache);
@@ -922,7 +939,7 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
        }
 
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -930,14 +947,14 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
 {
        int r = -ENOENT;
 
-       mutex_lock(&vcpu->kvm->lock);
+       mutex_lock(&vcpu->kvm->arch.xen.xen_lock);
 
        switch (data->type) {
        case KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO:
                if (vcpu->arch.xen.vcpu_info_cache.active)
                        data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa;
                else
-                       data->u.gpa = GPA_INVALID;
+                       data->u.gpa = KVM_XEN_INVALID_GPA;
                r = 0;
                break;
 
@@ -945,7 +962,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                if (vcpu->arch.xen.vcpu_time_info_cache.active)
                        data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa;
                else
-                       data->u.gpa = GPA_INVALID;
+                       data->u.gpa = KVM_XEN_INVALID_GPA;
                r = 0;
                break;
 
@@ -1013,7 +1030,7 @@ int kvm_xen_vcpu_get_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
                break;
        }
 
-       mutex_unlock(&vcpu->kvm->lock);
+       mutex_unlock(&vcpu->kvm->arch.xen.xen_lock);
        return r;
 }
 
@@ -1069,6 +1086,7 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
                u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
                                  : kvm->arch.xen_hvm_config.blob_size_32;
                u8 *page;
+               int ret;
 
                if (page_num >= blob_size)
                        return 1;
@@ -1079,10 +1097,10 @@ int kvm_xen_write_hypercall_page(struct kvm_vcpu *vcpu, u64 data)
                if (IS_ERR(page))
                        return PTR_ERR(page);
 
-               if (kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE)) {
-                       kfree(page);
+               ret = kvm_vcpu_write_guest(vcpu, page_addr, page, PAGE_SIZE);
+               kfree(page);
+               if (ret)
                        return 1;
-               }
        }
        return 0;
 }
@@ -1105,7 +1123,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
             xhc->blob_size_32 || xhc->blob_size_64))
                return -EINVAL;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        if (xhc->msr && !kvm->arch.xen_hvm_config.msr)
                static_branch_inc(&kvm_xen_enabled.key);
@@ -1114,7 +1132,7 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
 
        memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc));
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        return 0;
 }
 
@@ -1183,30 +1201,22 @@ static bool wait_pending_event(struct kvm_vcpu *vcpu, int nr_ports,
 static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
                                 u64 param, u64 *r)
 {
-       int idx, i;
        struct sched_poll sched_poll;
        evtchn_port_t port, *ports;
-       gpa_t gpa;
+       struct x86_exception e;
+       int i;
 
        if (!lapic_in_kernel(vcpu) ||
            !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND))
                return false;
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-       if (!gpa) {
-               *r = -EFAULT;
-               return true;
-       }
-
        if (IS_ENABLED(CONFIG_64BIT) && !longmode) {
                struct compat_sched_poll sp32;
 
                /* Sanity check that the compat struct definition is correct */
                BUILD_BUG_ON(sizeof(sp32) != 16);
 
-               if (kvm_vcpu_read_guest(vcpu, gpa, &sp32, sizeof(sp32))) {
+               if (kvm_read_guest_virt(vcpu, param, &sp32, sizeof(sp32), &e)) {
                        *r = -EFAULT;
                        return true;
                }
@@ -1220,8 +1230,8 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
                sched_poll.nr_ports = sp32.nr_ports;
                sched_poll.timeout = sp32.timeout;
        } else {
-               if (kvm_vcpu_read_guest(vcpu, gpa, &sched_poll,
-                                       sizeof(sched_poll))) {
+               if (kvm_read_guest_virt(vcpu, param, &sched_poll,
+                                       sizeof(sched_poll), &e)) {
                        *r = -EFAULT;
                        return true;
                }
@@ -1243,18 +1253,13 @@ static bool kvm_xen_schedop_poll(struct kvm_vcpu *vcpu, bool longmode,
        } else
                ports = &port;
 
+       if (kvm_read_guest_virt(vcpu, (gva_t)sched_poll.ports, ports,
+                               sched_poll.nr_ports * sizeof(*ports), &e)) {
+               *r = -EFAULT;
+               return true;
+       }
+
        for (i = 0; i < sched_poll.nr_ports; i++) {
-               idx = srcu_read_lock(&vcpu->kvm->srcu);
-               gpa = kvm_mmu_gva_to_gpa_system(vcpu,
-                                               (gva_t)(sched_poll.ports + i),
-                                               NULL);
-               srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
-               if (!gpa || kvm_vcpu_read_guest(vcpu, gpa,
-                                               &ports[i], sizeof(port))) {
-                       *r = -EFAULT;
-                       goto out;
-               }
                if (ports[i] >= max_evtchn_port(vcpu->kvm)) {
                        *r = -EINVAL;
                        goto out;
@@ -1330,9 +1335,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
                                  int vcpu_id, u64 param, u64 *r)
 {
        struct vcpu_set_singleshot_timer oneshot;
+       struct x86_exception e;
        s64 delta;
-       gpa_t gpa;
-       int idx;
 
        if (!kvm_xen_timer_enabled(vcpu))
                return false;
@@ -1343,9 +1347,6 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
                        *r = -EINVAL;
                        return true;
                }
-               idx = srcu_read_lock(&vcpu->kvm->srcu);
-               gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
-               srcu_read_unlock(&vcpu->kvm->srcu, idx);
 
                /*
                 * The only difference for 32-bit compat is the 4 bytes of
@@ -1363,9 +1364,8 @@ static bool kvm_xen_hcall_vcpu_op(struct kvm_vcpu *vcpu, bool longmode, int cmd,
                BUILD_BUG_ON(sizeof_field(struct compat_vcpu_set_singleshot_timer, flags) !=
                             sizeof_field(struct vcpu_set_singleshot_timer, flags));
 
-               if (!gpa ||
-                   kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) :
-                                       sizeof(struct compat_vcpu_set_singleshot_timer))) {
+               if (kvm_read_guest_virt(vcpu, param, &oneshot, longmode ? sizeof(oneshot) :
+                                       sizeof(struct compat_vcpu_set_singleshot_timer), &e)) {
                        *r = -EFAULT;
                        return true;
                }
@@ -1675,15 +1675,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                mm_borrowed = true;
        }
 
-       /*
-        * For the irqfd workqueue, using the main kvm->lock mutex is
-        * fine since this function is invoked from kvm_set_irq() with
-        * no other lock held, no srcu. In future if it will be called
-        * directly from a vCPU thread (e.g. on hypercall for an IPI)
-        * then it may need to switch to using a leaf-node mutex for
-        * serializing the shared_info mapping.
-        */
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
        /*
         * It is theoretically possible for the page to be unmapped
@@ -1712,7 +1704,7 @@ static int kvm_xen_set_evtchn(struct kvm_xen_evtchn *xe, struct kvm *kvm)
                srcu_read_unlock(&kvm->srcu, idx);
        } while(!rc);
 
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (mm_borrowed)
                kthread_unuse_mm(kvm->mm);
@@ -1825,20 +1817,20 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
 {
        u32 port = data->u.evtchn.send_port;
        struct evtchnfd *evtchnfd;
+       int ret;
 
-       if (!port || port >= max_evtchn_port(kvm))
-               return -EINVAL;
-
-       mutex_lock(&kvm->lock);
+       /* Protect writes to evtchnfd as well as the idr lookup.  */
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port);
-       mutex_unlock(&kvm->lock);
 
+       ret = -ENOENT;
        if (!evtchnfd)
-               return -ENOENT;
+               goto out_unlock;
 
        /* For an UPDATE, nothing may change except the priority/vcpu */
+       ret = -EINVAL;
        if (evtchnfd->type != data->u.evtchn.type)
-               return -EINVAL;
+               goto out_unlock;
 
        /*
         * Port cannot change, and if it's zero that was an eventfd
@@ -1846,20 +1838,21 @@ static int kvm_xen_eventfd_update(struct kvm *kvm,
         */
        if (!evtchnfd->deliver.port.port ||
            evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port)
-               return -EINVAL;
+               goto out_unlock;
 
        /* We only support 2 level event channels for now */
        if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
-               return -EINVAL;
+               goto out_unlock;
 
-       mutex_lock(&kvm->lock);
        evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
        if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) {
                evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu;
                evtchnfd->deliver.port.vcpu_idx = -1;
        }
-       mutex_unlock(&kvm->lock);
-       return 0;
+       ret = 0;
+out_unlock:
+       mutex_unlock(&kvm->arch.xen.xen_lock);
+       return ret;
 }
 
 /*
@@ -1871,12 +1864,9 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
 {
        u32 port = data->u.evtchn.send_port;
        struct eventfd_ctx *eventfd = NULL;
-       struct evtchnfd *evtchnfd = NULL;
+       struct evtchnfd *evtchnfd;
        int ret = -EINVAL;
 
-       if (!port || port >= max_evtchn_port(kvm))
-               return -EINVAL;
-
        evtchnfd = kzalloc(sizeof(struct evtchnfd), GFP_KERNEL);
        if (!evtchnfd)
                return -ENOMEM;
@@ -1924,10 +1914,10 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
                evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority;
        }
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1,
                        GFP_KERNEL);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
        if (ret >= 0)
                return 0;
 
@@ -1945,15 +1935,14 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
 {
        struct evtchnfd *evtchnfd;
 
-       mutex_lock(&kvm->lock);
+       mutex_lock(&kvm->arch.xen.xen_lock);
        evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port);
-       mutex_unlock(&kvm->lock);
+       mutex_unlock(&kvm->arch.xen.xen_lock);
 
        if (!evtchnfd)
                return -ENOENT;
 
-       if (kvm)
-               synchronize_srcu(&kvm->srcu);
+       synchronize_srcu(&kvm->srcu);
        if (!evtchnfd->deliver.port.port)
                eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
        kfree(evtchnfd);
@@ -1962,18 +1951,42 @@ static int kvm_xen_eventfd_deassign(struct kvm *kvm, u32 port)
 
 static int kvm_xen_eventfd_reset(struct kvm *kvm)
 {
-       struct evtchnfd *evtchnfd;
+       struct evtchnfd *evtchnfd, **all_evtchnfds;
        int i;
+       int n = 0;
+
+       mutex_lock(&kvm->arch.xen.xen_lock);
 
-       mutex_lock(&kvm->lock);
+       /*
+        * Because synchronize_srcu() cannot be called inside the
+        * critical section, first collect all the evtchnfd objects
+        * in an array as they are removed from evtchn_ports.
+        */
+       idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i)
+               n++;
+
+       all_evtchnfds = kmalloc_array(n, sizeof(struct evtchnfd *), GFP_KERNEL);
+       if (!all_evtchnfds) {
+               mutex_unlock(&kvm->arch.xen.xen_lock);
+               return -ENOMEM;
+       }
+
+       n = 0;
        idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
+               all_evtchnfds[n++] = evtchnfd;
                idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port);
-               synchronize_srcu(&kvm->srcu);
+       }
+       mutex_unlock(&kvm->arch.xen.xen_lock);
+
+       synchronize_srcu(&kvm->srcu);
+
+       while (n--) {
+               evtchnfd = all_evtchnfds[n];
                if (!evtchnfd->deliver.port.port)
                        eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx);
                kfree(evtchnfd);
        }
-       mutex_unlock(&kvm->lock);
+       kfree(all_evtchnfds);
 
        return 0;
 }
@@ -2002,20 +2015,22 @@ static bool kvm_xen_hcall_evtchn_send(struct kvm_vcpu *vcpu, u64 param, u64 *r)
 {
        struct evtchnfd *evtchnfd;
        struct evtchn_send send;
-       gpa_t gpa;
-       int idx;
+       struct x86_exception e;
 
-       idx = srcu_read_lock(&vcpu->kvm->srcu);
-       gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL);
-       srcu_read_unlock(&vcpu->kvm->srcu, idx);
-
-       if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) {
+       /* Sanity check: this structure is the same for 32-bit and 64-bit */
+       BUILD_BUG_ON(sizeof(send) != 4);
+       if (kvm_read_guest_virt(vcpu, param, &send, sizeof(send), &e)) {
                *r = -EFAULT;
                return true;
        }
 
-       /* The evtchn_ports idr is protected by vcpu->kvm->srcu */
+       /*
+        * evtchnfd is protected by kvm->srcu; the idr lookup instead
+        * is protected by RCU.
+        */
+       rcu_read_lock();
        evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port);
+       rcu_read_unlock();
        if (!evtchnfd)
                return false;
 
@@ -2063,6 +2078,7 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
 
 void kvm_xen_init_vm(struct kvm *kvm)
 {
+       mutex_init(&kvm->arch.xen.xen_lock);
        idr_init(&kvm->arch.xen.evtchn_ports);
        kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm, NULL, KVM_HOST_USES_PFN);
 }
index 21104c41cba04868e54d14f93a3592b9fa2541ba..558a605929db52f0bd05e8b547413d5d95e7eb90 100644 (file)
@@ -1595,16 +1595,16 @@ bool insn_decode_from_regs(struct insn *insn, struct pt_regs *regs,
  * Returns:
  *
  * Type of the instruction. Size of the memory operand is stored in
- * @bytes. If decode failed, MMIO_DECODE_FAILED returned.
+ * @bytes. If decode failed, INSN_MMIO_DECODE_FAILED returned.
  */
-enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
+enum insn_mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
 {
-       enum mmio_type type = MMIO_DECODE_FAILED;
+       enum insn_mmio_type type = INSN_MMIO_DECODE_FAILED;
 
        *bytes = 0;
 
        if (insn_get_opcode(insn))
-               return MMIO_DECODE_FAILED;
+               return INSN_MMIO_DECODE_FAILED;
 
        switch (insn->opcode.bytes[0]) {
        case 0x88: /* MOV m8,r8 */
@@ -1613,7 +1613,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
        case 0x89: /* MOV m16/m32/m64, r16/m32/m64 */
                if (!*bytes)
                        *bytes = insn->opnd_bytes;
-               type = MMIO_WRITE;
+               type = INSN_MMIO_WRITE;
                break;
 
        case 0xc6: /* MOV m8, imm8 */
@@ -1622,7 +1622,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
        case 0xc7: /* MOV m16/m32/m64, imm16/imm32/imm64 */
                if (!*bytes)
                        *bytes = insn->opnd_bytes;
-               type = MMIO_WRITE_IMM;
+               type = INSN_MMIO_WRITE_IMM;
                break;
 
        case 0x8a: /* MOV r8, m8 */
@@ -1631,7 +1631,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
        case 0x8b: /* MOV r16/r32/r64, m16/m32/m64 */
                if (!*bytes)
                        *bytes = insn->opnd_bytes;
-               type = MMIO_READ;
+               type = INSN_MMIO_READ;
                break;
 
        case 0xa4: /* MOVS m8, m8 */
@@ -1640,7 +1640,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
        case 0xa5: /* MOVS m16/m32/m64, m16/m32/m64 */
                if (!*bytes)
                        *bytes = insn->opnd_bytes;
-               type = MMIO_MOVS;
+               type = INSN_MMIO_MOVS;
                break;
 
        case 0x0f: /* Two-byte instruction */
@@ -1651,7 +1651,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
                case 0xb7: /* MOVZX r32/r64, m16 */
                        if (!*bytes)
                                *bytes = 2;
-                       type = MMIO_READ_ZERO_EXTEND;
+                       type = INSN_MMIO_READ_ZERO_EXTEND;
                        break;
 
                case 0xbe: /* MOVSX r16/r32/r64, m8 */
@@ -1660,7 +1660,7 @@ enum mmio_type insn_decode_mmio(struct insn *insn, int *bytes)
                case 0xbf: /* MOVSX r32/r64, m16 */
                        if (!*bytes)
                                *bytes = 2;
-                       type = MMIO_READ_SIGN_EXTEND;
+                       type = INSN_MMIO_READ_SIGN_EXTEND;
                        break;
                }
                break;
index a1f9416bf67a53df1f99f1315c48d78167f06060..6ff2f56cb0f71ae8a9066157c31babe112fc57aa 100644 (file)
@@ -10,6 +10,6 @@
  */
 SYM_FUNC_START(__iowrite32_copy)
        movl %edx,%ecx
-       rep movsd
+       rep movsl
        RET
 SYM_FUNC_END(__iowrite32_copy)
index d3987359d44140ae67c21bdc0798c210dc10fcf1..cb258f58fdc87935500c28053eef87ce1b02e206 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/pti.h>
 #include <asm/text-patching.h>
 #include <asm/memtype.h>
+#include <asm/paravirt.h>
 
 /*
  * We need to define the tracepoints somewhere, and tlb.c
@@ -804,6 +805,9 @@ void __init poking_init(void)
        poking_mm = mm_alloc();
        BUG_ON(!poking_mm);
 
+       /* Xen PV guests need the PGD to be pinned. */
+       paravirt_arch_dup_mmap(NULL, poking_mm);
+
        /*
         * Randomize the poking address, but make sure that the following page
         * will be mapped at the same PMD. We need 2 pages, so find space for 3,
index 46de9cf5c91d2be26a72afdd4de86b87c2b2c3c2..fb4b1b5e0deab2fcbd1f1a2442431fe63adbbc24 100644 (file)
@@ -387,7 +387,8 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
                u8 mtrr_type, uniform;
 
                mtrr_type = mtrr_type_lookup(start, end, &uniform);
-               if (mtrr_type != MTRR_TYPE_WRBACK)
+               if (mtrr_type != MTRR_TYPE_WRBACK &&
+                   mtrr_type != MTRR_TYPE_INVALID)
                        return _PAGE_CACHE_MODE_UC_MINUS;
 
                return _PAGE_CACHE_MODE_WB;
index 758cbfe55daa35519b0360c970cce25532598a9b..4b3efaa82ab7c14a747cea8dbe50ad6645cf1981 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/efi.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/bitmap.h>
@@ -442,17 +443,42 @@ static bool is_acpi_reserved(u64 start, u64 end, enum e820_type not_used)
        return mcfg_res.flags;
 }
 
+static bool is_efi_mmio(u64 start, u64 end, enum e820_type not_used)
+{
+#ifdef CONFIG_EFI
+       efi_memory_desc_t *md;
+       u64 size, mmio_start, mmio_end;
+
+       for_each_efi_memory_desc(md) {
+               if (md->type == EFI_MEMORY_MAPPED_IO) {
+                       size = md->num_pages << EFI_PAGE_SHIFT;
+                       mmio_start = md->phys_addr;
+                       mmio_end = mmio_start + size;
+
+                       /*
+                        * N.B. Caller supplies (start, start + size),
+                        * so to match, mmio_end is the first address
+                        * *past* the EFI_MEMORY_MAPPED_IO area.
+                        */
+                       if (mmio_start <= start && end <= mmio_end)
+                               return true;
+               }
+       }
+#endif
+
+       return false;
+}
+
 typedef bool (*check_reserved_t)(u64 start, u64 end, enum e820_type type);
 
 static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                                     struct pci_mmcfg_region *cfg,
-                                    struct device *dev, int with_e820)
+                                    struct device *dev, const char *method)
 {
        u64 addr = cfg->res.start;
        u64 size = resource_size(&cfg->res);
        u64 old_size = size;
        int num_buses;
-       char *method = with_e820 ? "E820" : "ACPI motherboard resources";
 
        while (!is_reserved(addr, addr + size, E820_TYPE_RESERVED)) {
                size >>= 1;
@@ -464,10 +490,10 @@ static bool __ref is_mmconf_reserved(check_reserved_t is_reserved,
                return false;
 
        if (dev)
-               dev_info(dev, "MMCONFIG at %pR reserved in %s\n",
+               dev_info(dev, "MMCONFIG at %pR reserved as %s\n",
                         &cfg->res, method);
        else
-               pr_info(PREFIX "MMCONFIG at %pR reserved in %s\n",
+               pr_info(PREFIX "MMCONFIG at %pR reserved as %s\n",
                       &cfg->res, method);
 
        if (old_size != size) {
@@ -500,7 +526,8 @@ static bool __ref
 pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int early)
 {
        if (!early && !acpi_disabled) {
-               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev, 0))
+               if (is_mmconf_reserved(is_acpi_reserved, cfg, dev,
+                                      "ACPI motherboard resource"))
                        return true;
 
                if (dev)
@@ -513,6 +540,10 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
                               "MMCONFIG at %pR not reserved in "
                               "ACPI motherboard resources\n",
                               &cfg->res);
+
+               if (is_mmconf_reserved(is_efi_mmio, cfg, dev,
+                                      "EfiMemoryMappedIO"))
+                       return true;
        }
 
        /*
@@ -527,7 +558,8 @@ pci_mmcfg_check_reserved(struct device *dev, struct pci_mmcfg_region *cfg, int e
        /* Don't try to do this check unless configuration
           type 1 is available. how about type 2 ?*/
        if (raw_pci_ops)
-               return is_mmconf_reserved(e820__mapped_all, cfg, dev, 1);
+               return is_mmconf_reserved(e820__mapped_all, cfg, dev,
+                                         "E820 entry");
 
        return false;
 }
index 48a3eb09d95165b4baea6e2cc8f00071e032ec76..650cdbbdaf45e5b2bb77605e60a9ccb52d397fee 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/elf.h>
 
 
-Elf32_Half elf_core_extra_phdrs(void)
+Elf32_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return vsyscall_ehdr ? (((struct elfhdr *)vsyscall_ehdr)->e_phnum) : 0;
 }
@@ -60,7 +60,7 @@ int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-size_t elf_core_extra_data_size(void)
+size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        if ( vsyscall_ehdr ) {
                const struct elfhdr *const ehdrp =
index 58db86f7b3846ef04b749154f0c9e9178654a806..9bdc3b656b2c49ac1bc1323b3e6ec4bf0ec418af 100644 (file)
@@ -134,11 +134,6 @@ static inline unsigned p2m_mid_index(unsigned long pfn)
        return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE;
 }
 
-static inline unsigned p2m_index(unsigned long pfn)
-{
-       return pfn % P2M_PER_PAGE;
-}
-
 static void p2m_top_mfn_init(unsigned long *top)
 {
        unsigned i;
index 228e4dff5fb2d3b7fb08e59743915d8223940dc8..a6d09fe0483110e46aaa8c48333d52677e2c86c0 100644 (file)
@@ -154,11 +154,6 @@ struct thread_struct {
        unsigned long ra; /* kernel's a0: return address and window call size */
        unsigned long sp; /* kernel's a1: stack pointer */
 
-       /* struct xtensa_cpuinfo info; */
-
-       unsigned long bad_vaddr; /* last user fault */
-       unsigned long bad_uaddr; /* last kernel fault accessing user space */
-       unsigned long error_code;
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        struct perf_event *ptrace_bp[XCHAL_NUM_IBREAK];
        struct perf_event *ptrace_wp[XCHAL_NUM_DBREAK];
@@ -176,10 +171,6 @@ struct thread_struct {
 {                                                                      \
        ra:             0,                                              \
        sp:             sizeof(init_stack) + (long) &init_stack,        \
-       /*info:         {0}, */                                         \
-       bad_vaddr:      0,                                              \
-       bad_uaddr:      0,                                              \
-       error_code:     0,                                              \
 }
 
 
index 0c25e035ff107b83f40c659e4ee1d7caf419910e..cd98366a9b238841c7e4aaa021fb623d3eb52136 100644 (file)
@@ -362,8 +362,6 @@ static void do_unaligned_user(struct pt_regs *regs)
        __die_if_kernel("Unhandled unaligned exception in kernel",
                        regs, SIGKILL);
 
-       current->thread.bad_vaddr = regs->excvaddr;
-       current->thread.error_code = -3;
        pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
                            "(pid = %d, pc = %#010lx)\n",
                            regs->excvaddr, current->comm,
index 8c781b05c0bdd9d55d62ff456c8150b94cf9d493..faf7cf35a0ee3d244f60332ad967da9e6d500142 100644 (file)
@@ -206,8 +206,6 @@ good_area:
 bad_area:
        mmap_read_unlock(mm);
        if (user_mode(regs)) {
-               current->thread.bad_vaddr = address;
-               current->thread.error_code = is_write;
                force_sig_fault(SIGSEGV, code, (void *) address);
                return;
        }
@@ -232,7 +230,6 @@ do_sigbus:
        /* Send a sigbus, regardless of whether we were in kernel
         * or user mode.
         */
-       current->thread.bad_vaddr = address;
        force_sig_fault(SIGBUS, BUS_ADRERR, (void *) address);
 
        /* Kernel mode? Handle exceptions or die */
@@ -252,7 +249,6 @@ bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
        if ((entry = search_exception_tables(regs->pc)) != NULL) {
                pr_debug("%s: Exception at pc=%#010lx (%lx)\n",
                         current->comm, regs->pc, entry->fixup);
-               current->thread.bad_uaddr = address;
                regs->pc = entry->fixup;
                return;
        }
index 444c5ab3b67e2fe1c3bed6989a819d0a02127604..5d9d9c84d51657f1c6d9e5b25bd3cac406308d76 100644 (file)
@@ -6,7 +6,6 @@ menuconfig BLOCK
        bool "Enable the block layer" if EXPERT
        default y
        select SBITMAP
-       select SRCU
        help
         Provide block layer support for the kernel.
 
index 1b2829e99dad02cab2b04a417d87f9481023053e..7d9b15f0dbd57917aa7d91f42b61ea9c305be2df 100644 (file)
@@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
 
 static void bfqg_get(struct bfq_group *bfqg)
 {
-       bfqg->ref++;
+       refcount_inc(&bfqg->ref);
 }
 
 static void bfqg_put(struct bfq_group *bfqg)
 {
-       bfqg->ref--;
-
-       if (bfqg->ref == 0)
+       if (refcount_dec_and_test(&bfqg->ref))
                kfree(bfqg);
 }
 
@@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
        }
 
        /* see comments in bfq_bic_update_cgroup for why refcounting */
-       bfqg_get(bfqg);
+       refcount_set(&bfqg->ref, 1);
        return &bfqg->pd;
 }
 
index 16f43bbc575a0aa167bfc2268057dda90cb85e51..ccf2204477a57b44a7522fa299dca123d9041278 100644 (file)
@@ -5317,8 +5317,8 @@ static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync)
                unsigned long flags;
 
                spin_lock_irqsave(&bfqd->lock, flags);
-               bfq_exit_bfqq(bfqd, bfqq);
                bic_set_bfqq(bic, NULL, is_sync);
+               bfq_exit_bfqq(bfqd, bfqq);
                spin_unlock_irqrestore(&bfqd->lock, flags);
        }
 }
index 41aa151ccc223457621abb30e24ec8a02701346b..466e4865ace63d1f12f897686442652da6e615a2 100644 (file)
@@ -928,7 +928,7 @@ struct bfq_group {
        char blkg_path[128];
 
        /* reference counter (see comments in bfq_bic_update_cgroup) */
-       int ref;
+       refcount_t ref;
        /* Is bfq_group still online? */
        bool online;
 
index 5f96fcae3f7549e99664bba9dedac6b43c3d7f96..ab59a491a883e3b657edc27b8f8cfd042843f9e0 100644 (file)
@@ -1401,6 +1401,27 @@ void __bio_advance(struct bio *bio, unsigned bytes)
 }
 EXPORT_SYMBOL(__bio_advance);
 
+void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
+                       struct bio *src, struct bvec_iter *src_iter)
+{
+       while (src_iter->bi_size && dst_iter->bi_size) {
+               struct bio_vec src_bv = bio_iter_iovec(src, *src_iter);
+               struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter);
+               unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
+               void *src_buf = bvec_kmap_local(&src_bv);
+               void *dst_buf = bvec_kmap_local(&dst_bv);
+
+               memcpy(dst_buf, src_buf, bytes);
+
+               kunmap_local(dst_buf);
+               kunmap_local(src_buf);
+
+               bio_advance_iter_single(src, src_iter, bytes);
+               bio_advance_iter_single(dst, dst_iter, bytes);
+       }
+}
+EXPORT_SYMBOL(bio_copy_data_iter);
+
 /**
  * bio_copy_data - copy contents of data buffers from one bio to another
  * @src: source bio
@@ -1414,21 +1435,7 @@ void bio_copy_data(struct bio *dst, struct bio *src)
        struct bvec_iter src_iter = src->bi_iter;
        struct bvec_iter dst_iter = dst->bi_iter;
 
-       while (src_iter.bi_size && dst_iter.bi_size) {
-               struct bio_vec src_bv = bio_iter_iovec(src, src_iter);
-               struct bio_vec dst_bv = bio_iter_iovec(dst, dst_iter);
-               unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len);
-               void *src_buf = bvec_kmap_local(&src_bv);
-               void *dst_buf = bvec_kmap_local(&dst_bv);
-
-               memcpy(dst_buf, src_buf, bytes);
-
-               kunmap_local(dst_buf);
-               kunmap_local(src_buf);
-
-               bio_advance_iter_single(src, &src_iter, bytes);
-               bio_advance_iter_single(dst, &dst_iter, bytes);
-       }
+       bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
 }
 EXPORT_SYMBOL(bio_copy_data);
 
index ce6a2b7d3dfb2b405e013393ffe8e90e92be504e..4c94a6560f625ebde92cfc95fa26337c5b2c60ed 100644 (file)
@@ -1455,6 +1455,10 @@ retry:
                list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
                        pol->pd_init_fn(blkg->pd[pol->plid]);
 
+       if (pol->pd_online_fn)
+               list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+                       pol->pd_online_fn(blkg->pd[pol->plid]);
+
        __set_bit(pol->plid, q->blkcg_pols);
        ret = 0;
 
index 9321767470dc5f5b3bcfd2939851a15a6dffe2c2..b5098355d8b27349d3ac08b6728afbd392fe2725 100644 (file)
@@ -283,12 +283,9 @@ static void blk_free_queue(struct request_queue *q)
  *
  * Decrements the refcount of the request_queue and free it when the refcount
  * reaches 0.
- *
- * Context: Can sleep.
  */
 void blk_put_queue(struct request_queue *q)
 {
-       might_sleep();
        if (refcount_dec_and_test(&q->refs))
                blk_free_queue(q);
 }
index 35a8f75cc45d1c7c8e63ecad419a19368a4e0260..b7c193d67185de9dc6b3a320fe49272229ebc32e 100644 (file)
@@ -309,6 +309,16 @@ static struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
        *segs = nsegs;
        return NULL;
 split:
+       /*
+        * We can't sanely support splitting for a REQ_NOWAIT bio. End it
+        * with EAGAIN if splitting is required and return an error pointer.
+        */
+       if (bio->bi_opf & REQ_NOWAIT) {
+               bio->bi_status = BLK_STS_AGAIN;
+               bio_endio(bio);
+               return ERR_PTR(-EAGAIN);
+       }
+
        *segs = nsegs;
 
        /*
@@ -358,11 +368,13 @@ struct bio *__bio_split_to_limits(struct bio *bio,
        default:
                split = bio_split_rw(bio, lim, nr_segs, bs,
                                get_max_io_size(bio, lim) << SECTOR_SHIFT);
+               if (IS_ERR(split))
+                       return NULL;
                break;
        }
 
        if (split) {
-               /* there isn't chance to merge the splitted bio */
+               /* there isn't chance to merge the split bio */
                split->bi_opf |= REQ_NOMERGE;
 
                blkcg_bio_issue_init(split);
index c5cf0dbca1db8dcae3930451b0ba56161e91753c..9d463f7563bc5cbebcdd2a22114046c6b4306e15 100644 (file)
@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
 {
        struct request *rq;
+       enum hctx_type type, hctx_type;
 
        if (!plug)
                return NULL;
@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
                return NULL;
        }
 
-       if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+       type = blk_mq_get_hctx_type((*bio)->bi_opf);
+       hctx_type = rq->mq_hctx->type;
+       if (type != hctx_type &&
+           !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
                return NULL;
        if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
                return NULL;
@@ -2951,8 +2955,11 @@ void blk_mq_submit_bio(struct bio *bio)
        blk_status_t ret;
 
        bio = blk_queue_bounce(bio, q);
-       if (bio_may_exceed_limits(bio, &q->limits))
+       if (bio_may_exceed_limits(bio, &q->limits)) {
                bio = __bio_split_to_limits(bio, &q->limits, &nr_segs);
+               if (!bio)
+                       return;
+       }
 
        if (!bio_integrity_prep(bio))
                return;
index ab3cbe44196f77c1b69a6847813997d3396e96b0..23cf83b3331cdea5c916fbf01cb5b92aeb2f7cf8 100644 (file)
@@ -1201,10 +1201,21 @@ struct class block_class = {
        .dev_uevent     = block_uevent,
 };
 
+static char *block_devnode(struct device *dev, umode_t *mode,
+                          kuid_t *uid, kgid_t *gid)
+{
+       struct gendisk *disk = dev_to_disk(dev);
+
+       if (disk->fops->devnode)
+               return disk->fops->devnode(disk, mode);
+       return NULL;
+}
+
 const struct device_type disk_type = {
        .name           = "disk",
        .groups         = disk_attr_groups,
        .release        = disk_release,
+       .devnode        = block_devnode,
 };
 
 #ifdef CONFIG_PROC_FS
index 08cf8a17754bb6147e85d5932a063efd446ed29a..07373b3debd1e61efd293d51598545769503f29f 100644 (file)
@@ -354,6 +354,9 @@ void spk_ttyio_release(struct spk_synth *in_synth)
 {
        struct tty_struct *tty = in_synth->dev;
 
+       if (tty == NULL)
+               return;
+
        tty_lock(tty);
 
        if (tty->ops->close)
index 30d8fd03fec7ce783d04ee1e9f05c790f853b52a..97b711e57bff457bbfec645aeee7ca2efd668229 100644 (file)
@@ -70,11 +70,7 @@ module_param(device_id_scheme, bool, 0444);
 static int only_lcd = -1;
 module_param(only_lcd, int, 0444);
 
-/*
- * Display probing is known to take up to 5 seconds, so delay the fallback
- * backlight registration by 5 seconds + 3 seconds for some extra margin.
- */
-static int register_backlight_delay = 8;
+static int register_backlight_delay;
 module_param(register_backlight_delay, int, 0444);
 MODULE_PARM_DESC(register_backlight_delay,
        "Delay in seconds before doing fallback (non GPU driver triggered) "
@@ -2176,6 +2172,17 @@ static bool should_check_lcd_flag(void)
        return false;
 }
 
+/*
+ * At least one graphics driver has reported that no LCD is connected
+ * via the native interface. cancel the registration for fallback acpi_video0.
+ * If another driver still deems this necessary, it can explicitly register it.
+ */
+void acpi_video_report_nolcd(void)
+{
+       cancel_delayed_work(&video_bus_register_backlight_work);
+}
+EXPORT_SYMBOL(acpi_video_report_nolcd);
+
 int acpi_video_register(void)
 {
        int ret = 0;
index 204fe94c7e458c725627422f4eb7bea9a110cf1e..a194f30876c59a9f8d09fca5aec15b40e062546d 100644 (file)
@@ -75,7 +75,8 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
 }
 
 #define FIND_CHILD_MIN_SCORE   1
-#define FIND_CHILD_MAX_SCORE   2
+#define FIND_CHILD_MID_SCORE   2
+#define FIND_CHILD_MAX_SCORE   3
 
 static int match_any(struct acpi_device *adev, void *not_used)
 {
@@ -96,8 +97,17 @@ static int find_child_checks(struct acpi_device *adev, bool check_children)
                return -ENODEV;
 
        status = acpi_evaluate_integer(adev->handle, "_STA", NULL, &sta);
-       if (status == AE_NOT_FOUND)
+       if (status == AE_NOT_FOUND) {
+               /*
+                * Special case: backlight device objects without _STA are
+                * preferred to other objects with the same _ADR value, because
+                * it is more likely that they are actually useful.
+                */
+               if (adev->pnp.type.backlight)
+                       return FIND_CHILD_MID_SCORE;
+
                return FIND_CHILD_MIN_SCORE;
+       }
 
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
                return -ENODEV;
index 998101cf16e4714552d3921681a7fe0c53338926..3d4c4620f9f953096dcf265ccf9c36c8c1cf15da 100644 (file)
@@ -236,6 +236,11 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
        efi_status_t status;
        struct prm_context_buffer context;
 
+       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+               pr_err_ratelimited("PRM: EFI runtime services no longer available\n");
+               return AE_NO_HANDLER;
+       }
+
        /*
         * The returned acpi_status will always be AE_OK. Error values will be
         * saved in the first byte of the PRM message buffer to be used by ASL.
@@ -325,6 +330,11 @@ void __init init_prmt(void)
 
        pr_info("PRM: found %u modules\n", mc);
 
+       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+               pr_err("PRM: EFI runtime services unavailable\n");
+               return;
+       }
+
        status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
                                                    ACPI_ADR_SPACE_PLATFORM_RT,
                                                    &acpi_platformrt_space_handler,
index f27914aedbd5ad9262c7ca3340e136847b72c090..192d1784e409b49295a322d4068f5637225a1fef 100644 (file)
@@ -432,10 +432,31 @@ static const struct dmi_system_id asus_laptop[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
                },
        },
+       {
+               .ident = "Asus ExpertBook B2402CBA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B2402CBA"),
+               },
+       },
+       {
+               .ident = "Asus ExpertBook B2502",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "B2502CBA"),
+               },
+       },
        { }
 };
 
-static const struct dmi_system_id lenovo_82ra[] = {
+static const struct dmi_system_id lenovo_laptop[] = {
+       {
+               .ident = "LENOVO IdeaPad Flex 5 14ALC7",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "82R9"),
+               },
+       },
        {
                .ident = "LENOVO IdeaPad Flex 5 16ALC7",
                .matches = {
@@ -446,6 +467,17 @@ static const struct dmi_system_id lenovo_82ra[] = {
        { }
 };
 
+static const struct dmi_system_id schenker_gm_rg[] = {
+       {
+               .ident = "XMG CORE 15 (M22)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+                       DMI_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+               },
+       },
+       { }
+};
+
 struct irq_override_cmp {
        const struct dmi_system_id *system;
        unsigned char irq;
@@ -458,8 +490,9 @@ struct irq_override_cmp {
 static const struct irq_override_cmp override_table[] = {
        { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
        { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false },
-       { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
-       { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+       { lenovo_laptop, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+       { lenovo_laptop, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true },
+       { schenker_gm_rg, 1, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW, 1, true },
 };
 
 static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
index 274344434282ee8e2cc60d91130729270550f536..0c6f06abe3f47f2b77879b0fd051ffb5a043be58 100644 (file)
@@ -1370,9 +1370,12 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
                 * Some devices don't reliably have _HIDs & _CIDs, so add
                 * synthetic HIDs to make sure drivers can find them.
                 */
-               if (acpi_is_video_device(handle))
+               if (acpi_is_video_device(handle)) {
                        acpi_add_id(pnp, ACPI_VIDEO_HID);
-               else if (acpi_bay_match(handle))
+                       pnp->type.backlight = 1;
+                       break;
+               }
+               if (acpi_bay_match(handle))
                        acpi_add_id(pnp, ACPI_BAY_HID);
                else if (acpi_dock_match(handle))
                        acpi_add_id(pnp, ACPI_DOCK_HID);
index a934bbc9dd37c63fc960fbdf5d9de1361fc85118..65cec7bb6d9615dace23df4c121d0fc5721e4e8e 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/platform_data/x86/nvidia-wmi-ec-backlight.h>
+#include <linux/pnp.h>
 #include <linux/types.h>
 #include <linux/workqueue.h>
 #include <acpi/video.h>
@@ -49,6 +50,10 @@ static void acpi_video_parse_cmdline(void)
                acpi_backlight_cmdline = acpi_backlight_video;
        if (!strcmp("native", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_native;
+       if (!strcmp("nvidia_wmi_ec", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
+       if (!strcmp("apple_gmux", acpi_video_backlight_string))
+               acpi_backlight_cmdline = acpi_backlight_apple_gmux;
        if (!strcmp("none", acpi_video_backlight_string))
                acpi_backlight_cmdline = acpi_backlight_none;
 }
@@ -105,6 +110,26 @@ static bool nvidia_wmi_ec_supported(void)
 }
 #endif
 
+static bool apple_gmux_backlight_present(void)
+{
+       struct acpi_device *adev;
+       struct device *dev;
+
+       adev = acpi_dev_get_first_match_dev(GMUX_ACPI_HID, NULL, -1);
+       if (!adev)
+               return false;
+
+       dev = acpi_get_first_physical_node(adev);
+       if (!dev)
+               return false;
+
+       /*
+        * drivers/platform/x86/apple-gmux.c only supports old style
+        * Apple GMUX with an IO-resource.
+        */
+       return pnp_get_resource(to_pnp_dev(dev), IORESOURCE_IO, 0) != NULL;
+}
+
 /* Force to use vendor driver when the ACPI device is known to be
  * buggy */
 static int video_detect_force_vendor(const struct dmi_system_id *d)
@@ -490,6 +515,14 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"),
                },
        },
+       {
+        .callback = video_detect_force_native,
+        /* Acer Aspire 4810T */
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 4810T"),
+               },
+       },
        {
         .callback = video_detect_force_native,
         /* Acer Aspire 5738z */
@@ -767,7 +800,7 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native)
        if (nvidia_wmi_ec_present)
                return acpi_backlight_nvidia_wmi_ec;
 
-       if (apple_gmux_present())
+       if (apple_gmux_backlight_present())
                return acpi_backlight_apple_gmux;
 
        /* Use ACPI video if available, except when native should be preferred. */
index 5350c73564b601fffa1f31ee566ae67a50d74de0..c7afce465a0710d0f57bfc37506857b6a8345830 100644 (file)
@@ -28,10 +28,6 @@ static bool sleep_no_lps0 __read_mostly;
 module_param(sleep_no_lps0, bool, 0644);
 MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
 
-static bool prefer_microsoft_dsm_guid __read_mostly;
-module_param(prefer_microsoft_dsm_guid, bool, 0644);
-MODULE_PARM_DESC(prefer_microsoft_dsm_guid, "Prefer using Microsoft GUID in LPS0 device _DSM evaluation");
-
 static const struct acpi_device_id lps0_device_ids[] = {
        {"PNP0D80", },
        {"", },
@@ -369,27 +365,15 @@ out:
 }
 
 struct amd_lps0_hid_device_data {
-       const unsigned int rev_id;
        const bool check_off_by_one;
-       const bool prefer_amd_guid;
 };
 
 static const struct amd_lps0_hid_device_data amd_picasso = {
-       .rev_id = 0,
        .check_off_by_one = true,
-       .prefer_amd_guid = false,
 };
 
 static const struct amd_lps0_hid_device_data amd_cezanne = {
-       .rev_id = 0,
-       .check_off_by_one = false,
-       .prefer_amd_guid = false,
-};
-
-static const struct amd_lps0_hid_device_data amd_rembrandt = {
-       .rev_id = 2,
        .check_off_by_one = false,
-       .prefer_amd_guid = true,
 };
 
 static const struct acpi_device_id amd_hid_ids[] = {
@@ -397,69 +381,27 @@ static const struct acpi_device_id amd_hid_ids[] = {
        {"AMD0005",     (kernel_ulong_t)&amd_picasso,   },
        {"AMDI0005",    (kernel_ulong_t)&amd_picasso,   },
        {"AMDI0006",    (kernel_ulong_t)&amd_cezanne,   },
-       {"AMDI0007",    (kernel_ulong_t)&amd_rembrandt, },
        {}
 };
 
-static int lps0_prefer_microsoft(const struct dmi_system_id *id)
+static int lps0_prefer_amd(const struct dmi_system_id *id)
 {
-       pr_debug("Preferring Microsoft GUID.\n");
-       prefer_microsoft_dsm_guid = true;
+       pr_debug("Using AMD GUID w/ _REV 2.\n");
+       rev_id = 2;
        return 0;
 }
-
 static const struct dmi_system_id s2idle_dmi_table[] __initconst = {
        {
                /*
-                * ASUS TUF Gaming A17 FA707RE
-                * https://bugzilla.kernel.org/show_bug.cgi?id=216101
-                */
-               .callback = lps0_prefer_microsoft,
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ASUS TUF Gaming A17"),
-               },
-       },
-       {
-               /* ASUS ROG Zephyrus G14 (2022) */
-               .callback = lps0_prefer_microsoft,
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus G14 GA402"),
-               },
-       },
-       {
-               /*
-                * Lenovo Yoga Slim 7 Pro X 14ARH7
-                * https://bugzilla.kernel.org/show_bug.cgi?id=216473 : 82V2
-                * https://bugzilla.kernel.org/show_bug.cgi?id=216438 : 82TL
-                */
-               .callback = lps0_prefer_microsoft,
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "82"),
-               },
-       },
-       {
-               /*
-                * ASUSTeK COMPUTER INC. ROG Flow X13 GV301RE_GV301RE
-                * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
+                * AMD Rembrandt based HP EliteBook 835/845/865 G9
+                * Contains specialized AML in AMD/_REV 2 path to avoid
+                * triggering a bug in Qualcomm WLAN firmware. This may be
+                * removed in the future if that firmware is fixed.
                 */
-               .callback = lps0_prefer_microsoft,
+               .callback = lps0_prefer_amd,
                .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X13 GV301"),
-               },
-       },
-       {
-               /*
-                * ASUSTeK COMPUTER INC. ROG Flow X16 GV601RW_GV601RW
-                * https://gitlab.freedesktop.org/drm/amd/-/issues/2148
-                */
-               .callback = lps0_prefer_microsoft,
-               .matches = {
-                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_MATCH(DMI_PRODUCT_NAME, "ROG Flow X16 GV601"),
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HP"),
+                       DMI_MATCH(DMI_BOARD_NAME, "8990"),
                },
        },
        {}
@@ -484,16 +426,14 @@ static int lps0_device_attach(struct acpi_device *adev,
                if (dev_id->id[0])
                        data = (const struct amd_lps0_hid_device_data *) dev_id->driver_data;
                else
-                       data = &amd_rembrandt;
-               rev_id = data->rev_id;
+                       data = &amd_cezanne;
                lps0_dsm_func_mask = validate_dsm(adev->handle,
                                        ACPI_LPS0_DSM_UUID_AMD, rev_id, &lps0_dsm_guid);
                if (lps0_dsm_func_mask > 0x3 && data->check_off_by_one) {
                        lps0_dsm_func_mask = (lps0_dsm_func_mask << 1) | 0x1;
                        acpi_handle_debug(adev->handle, "_DSM UUID %s: Adjusted function mask: 0x%x\n",
                                          ACPI_LPS0_DSM_UUID_AMD, lps0_dsm_func_mask);
-               } else if (lps0_dsm_func_mask_microsoft > 0 && data->prefer_amd_guid &&
-                               !prefer_microsoft_dsm_guid) {
+               } else if (lps0_dsm_func_mask_microsoft > 0 && rev_id) {
                        lps0_dsm_func_mask_microsoft = -EINVAL;
                        acpi_handle_debug(adev->handle, "_DSM Using AMD method\n");
                }
@@ -501,8 +441,7 @@ static int lps0_device_attach(struct acpi_device *adev,
                rev_id = 1;
                lps0_dsm_func_mask = validate_dsm(adev->handle,
                                        ACPI_LPS0_DSM_UUID, rev_id, &lps0_dsm_guid);
-               if (!prefer_microsoft_dsm_guid)
-                       lps0_dsm_func_mask_microsoft = -EINVAL;
+               lps0_dsm_func_mask_microsoft = -EINVAL;
        }
 
        if (lps0_dsm_func_mask < 0 && lps0_dsm_func_mask_microsoft < 0)
index eceaec33af65b479f7c0d942fa4ac7e6e7085efb..9695c4404e26a17698d9b4f807377e630d47018d 100644 (file)
@@ -640,6 +640,7 @@ config PATA_CS5530
 config PATA_CS5535
        tristate "CS5535 PATA support (Experimental)"
        depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
+       depends on !UML
        help
          This option enables support for the NatSemi/AMD CS5535
          companion chip used with the Geode processor family.
index 0cfd0ec6229b3208d613342d618754823ebcdeee..14a1c0d14916f928bbbd34f235af7f2a3b3febd2 100644 (file)
@@ -83,6 +83,7 @@ enum board_ids {
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static void ahci_remove_one(struct pci_dev *dev);
 static void ahci_shutdown_one(struct pci_dev *dev);
+static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv);
 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
                                 unsigned long deadline);
 static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
@@ -676,6 +677,25 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
        ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
+static int ahci_pci_reset_controller(struct ata_host *host)
+{
+       struct pci_dev *pdev = to_pci_dev(host->dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       int rc;
+
+       rc = ahci_reset_controller(host);
+       if (rc)
+               return rc;
+
+       /*
+        * If platform firmware failed to enable ports, try to enable
+        * them here.
+        */
+       ahci_intel_pcs_quirk(pdev, hpriv);
+
+       return 0;
+}
+
 static void ahci_pci_init_controller(struct ata_host *host)
 {
        struct ahci_host_priv *hpriv = host->private_data;
@@ -870,7 +890,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
        struct ata_host *host = pci_get_drvdata(pdev);
        int rc;
 
-       rc = ahci_reset_controller(host);
+       rc = ahci_pci_reset_controller(host);
        if (rc)
                return rc;
        ahci_pci_init_controller(host);
@@ -906,7 +926,7 @@ static int ahci_pci_device_resume(struct device *dev)
                ahci_mcp89_apple_enable(pdev);
 
        if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
-               rc = ahci_reset_controller(host);
+               rc = ahci_pci_reset_controller(host);
                if (rc)
                        return rc;
 
@@ -1784,12 +1804,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* save initial config */
        ahci_pci_save_initial_config(pdev, hpriv);
 
-       /*
-        * If platform firmware failed to enable ports, try to enable
-        * them here.
-        */
-       ahci_intel_pcs_quirk(pdev, hpriv);
-
        /* prepare host */
        if (hpriv->cap & HOST_CAP_NCQ) {
                pi.flags |= ATA_FLAG_NCQ;
@@ -1899,7 +1913,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (rc)
                return rc;
 
-       rc = ahci_reset_controller(host);
+       rc = ahci_pci_reset_controller(host);
        if (rc)
                return rc;
 
index bbb3e499ff4a5db3f7238959668f8642429b6487..083a95791d3b3ca0444bfa9390a6d66c3117e64a 100644 (file)
@@ -997,26 +997,32 @@ struct fwnode_handle *
 fwnode_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
                               struct fwnode_handle *prev)
 {
+       struct fwnode_handle *ep, *port_parent = NULL;
        const struct fwnode_handle *parent;
-       struct fwnode_handle *ep;
 
        /*
         * If this function is in a loop and the previous iteration returned
         * an endpoint from fwnode->secondary, then we need to use the secondary
         * as parent rather than @fwnode.
         */
-       if (prev)
-               parent = fwnode_graph_get_port_parent(prev);
-       else
+       if (prev) {
+               port_parent = fwnode_graph_get_port_parent(prev);
+               parent = port_parent;
+       } else {
                parent = fwnode;
+       }
        if (IS_ERR_OR_NULL(parent))
                return NULL;
 
        ep = fwnode_call_ptr_op(parent, graph_get_next_endpoint, prev);
        if (ep)
-               return ep;
+               goto out_put_port_parent;
+
+       ep = fwnode_graph_get_next_endpoint(parent->secondary, NULL);
 
-       return fwnode_graph_get_next_endpoint(parent->secondary, NULL);
+out_put_port_parent:
+       fwnode_handle_put(port_parent);
+       return ep;
 }
 EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint);
 
index 4d1976ca507271487235107b17a74226af6164b8..929410d0dd6fe2a726eff7094b7b63ff5cbd516c 100644 (file)
@@ -145,7 +145,7 @@ static int __init test_async_probe_init(void)
        calltime = ktime_get();
        for_each_online_cpu(cpu) {
                nid = cpu_to_node(cpu);
-               pdev = &sync_dev[sync_id];
+               pdev = &async_dev[async_id];
 
                *pdev = test_platform_device_register_node("test_async_driver",
                                                           async_id,
index a2184b42849361791f4d7d027dc3c46bf9a2e569..a41145d52de9426b0b6c9ac0ba97c0f7e4c79b5d 100644 (file)
@@ -285,6 +285,49 @@ config BLK_DEV_RAM_SIZE
          The default value is 4096 kilobytes. Only change this if you know
          what you are doing.
 
+config CDROM_PKTCDVD
+       tristate "Packet writing on CD/DVD media (DEPRECATED)"
+       depends on !UML
+       depends on SCSI
+       select CDROM
+       help
+         Note: This driver is deprecated and will be removed from the
+         kernel in the near future!
+
+         If you have a CDROM/DVD drive that supports packet writing, say
+         Y to include support. It should work with any MMC/Mt Fuji
+         compliant ATAPI or SCSI drive, which is just about any newer
+         DVD/CD writer.
+
+         Currently only writing to CD-RW, DVD-RW, DVD+RW and DVDRAM discs
+         is possible.
+         DVD-RW disks must be in restricted overwrite mode.
+
+         See the file <file:Documentation/cdrom/packet-writing.rst>
+         for further information on the use of this driver.
+
+         To compile this driver as a module, choose M here: the
+         module will be called pktcdvd.
+
+config CDROM_PKTCDVD_BUFFERS
+       int "Free buffers for data gathering"
+       depends on CDROM_PKTCDVD
+       default "8"
+       help
+         This controls the maximum number of active concurrent packets. More
+         concurrent packets can increase write performance, but also require
+         more memory. Each concurrent packet will require approximately 64Kb
+         of non-swappable kernel memory, memory which will be allocated when
+         a disc is opened for writing.
+
+config CDROM_PKTCDVD_WCACHE
+       bool "Enable write caching"
+       depends on CDROM_PKTCDVD
+       help
+         If enabled, write caching will be set for the CD-R/W device. For now
+         this option is dangerous unless the CD-RW media is known good, as we
+         don't do deferred write error handling yet.
+
 config ATA_OVER_ETH
        tristate "ATA over Ethernet support"
        depends on NET
index 962ee65d8ca303a608a1e780328dda47bbe4e293..101612cba303a39336cd290035c854ab3df0ff72 100644 (file)
@@ -20,6 +20,7 @@ obj-$(CONFIG_AMIGA_Z2RAM)     += z2ram.o
 obj-$(CONFIG_N64CART)          += n64cart.o
 obj-$(CONFIG_BLK_DEV_RAM)      += brd.o
 obj-$(CONFIG_BLK_DEV_LOOP)     += loop.o
+obj-$(CONFIG_CDROM_PKTCDVD)    += pktcdvd.o
 obj-$(CONFIG_SUNVDC)           += sunvdc.o
 
 obj-$(CONFIG_BLK_DEV_NBD)      += nbd.o
index eb14ec8ec04cfcaf46011809fae0c8fb9f6e8428..e36216d50753c83843e3222632872ae76c685ccd 100644 (file)
@@ -1607,6 +1607,8 @@ void drbd_submit_bio(struct bio *bio)
        struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
 
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        /*
         * what we "blindly" assume:
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644 (file)
index 0000000..2f1a925
--- /dev/null
@@ -0,0 +1,2946 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ * Copyright (C) 2006 Thomas Maier <balagi@justmail.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-RW, DVD+RW, DVD-RW and
+ * DVD-RAM devices.
+ *
+ * Theory of operation:
+ *
+ * At the lowest level, there is the standard driver for the CD/DVD device,
+ * such as drivers/scsi/sr.c. This driver can handle read and write requests,
+ * but it doesn't know anything about the special restrictions that apply to
+ * packet writing. One restriction is that write requests must be aligned to
+ * packet boundaries on the physical media, and the size of a write request
+ * must be equal to the packet size. Another restriction is that a
+ * GPCMD_FLUSH_CACHE command has to be issued to the drive before a read
+ * command, if the previous command was a write.
+ *
+ * The purpose of the packet writing driver is to hide these restrictions from
+ * higher layers, such as file systems, and present a block device that can be
+ * randomly read and written using 2kB-sized blocks.
+ *
+ * The lowest layer in the packet writing driver is the packet I/O scheduler.
+ * Its data is defined by the struct packet_iosched and includes two bio
+ * queues with pending read and write requests. These queues are processed
+ * by the pkt_iosched_process_queue() function. The write requests in this
+ * queue are already properly aligned and sized. This layer is responsible for
+ * issuing the flush cache commands and scheduling the I/O in a good order.
+ *
+ * The next layer transforms unaligned write requests to aligned writes. This
+ * transformation requires reading missing pieces of data from the underlying
+ * block device, assembling the pieces to full packets and queuing them to the
+ * packet I/O scheduler.
+ *
+ * At the top layer there is a custom ->submit_bio function that forwards
+ * read requests directly to the iosched queue and puts write requests in the
+ * unaligned write queue. A kernel thread performs the necessary read
+ * gathering to convert the unaligned writes to aligned writes and then feeds
+ * them to the packet I/O scheduler.
+ *
+ *************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/pktcdvd.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/compat.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/freezer.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/backing-dev.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/nospec.h>
+#include <linux/uaccess.h>
+
+#define DRIVER_NAME    "pktcdvd"
+
+#define pkt_err(pd, fmt, ...)                                          \
+       pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_notice(pd, fmt, ...)                                       \
+       pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
+#define pkt_info(pd, fmt, ...)                                         \
+       pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
+
+#define pkt_dbg(level, pd, fmt, ...)                                   \
+do {                                                                   \
+       if (level == 2 && PACKET_DEBUG >= 2)                            \
+               pr_notice("%s: %s():" fmt,                              \
+                         pd->name, __func__, ##__VA_ARGS__);           \
+       else if (level == 1 && PACKET_DEBUG >= 1)                       \
+               pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__);         \
+} while (0)
+
+#define MAX_SPEED 0xffff
+
+static DEFINE_MUTEX(pktcdvd_mutex);
+static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+static struct proc_dir_entry *pkt_proc;
+static int pktdev_major;
+static int write_congestion_on  = PKT_WRITE_CONGESTION_ON;
+static int write_congestion_off = PKT_WRITE_CONGESTION_OFF;
+static struct mutex ctl_mutex; /* Serialize open/close/setup/teardown */
+static mempool_t psd_pool;
+static struct bio_set pkt_bio_set;
+
+static struct class    *class_pktcdvd = NULL;    /* /sys/class/pktcdvd */
+static struct dentry   *pkt_debugfs_root = NULL; /* /sys/kernel/debug/pktcdvd */
+
+/* forward declaration */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev);
+static int pkt_remove_dev(dev_t pkt_dev);
+static int pkt_seq_show(struct seq_file *m, void *p);
+
+static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
+{
+       return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
+}
+
+/**********************************************************
+ * sysfs interface for pktcdvd
+ * by (C) 2006  Thomas Maier <balagi@justmail.de>
+  /sys/class/pktcdvd/pktcdvd[0-7]/
+                     stat/reset
+                     stat/packets_started
+                     stat/packets_finished
+                     stat/kb_written
+                     stat/kb_read
+                     stat/kb_read_gather
+                     write_queue/size
+                     write_queue/congestion_off
+                     write_queue/congestion_on
+ **********************************************************/
+
+static ssize_t packets_started_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
+}
+static DEVICE_ATTR_RO(packets_started);
+
+static ssize_t packets_finished_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
+}
+static DEVICE_ATTR_RO(packets_finished);
+
+static ssize_t kb_written_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
+}
+static DEVICE_ATTR_RO(kb_written);
+
+static ssize_t kb_read_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
+}
+static DEVICE_ATTR_RO(kb_read);
+
+static ssize_t kb_read_gather_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
+}
+static DEVICE_ATTR_RO(kb_read_gather);
+
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t len)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+
+       if (len > 0) {
+               pd->stats.pkt_started = 0;
+               pd->stats.pkt_ended = 0;
+               pd->stats.secs_w = 0;
+               pd->stats.secs_rg = 0;
+               pd->stats.secs_r = 0;
+       }
+       return len;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *pkt_stat_attrs[] = {
+       &dev_attr_packets_finished.attr,
+       &dev_attr_packets_started.attr,
+       &dev_attr_kb_read.attr,
+       &dev_attr_kb_written.attr,
+       &dev_attr_kb_read_gather.attr,
+       &dev_attr_reset.attr,
+       NULL,
+};
+
+static const struct attribute_group pkt_stat_group = {
+       .name = "stat",
+       .attrs = pkt_stat_attrs,
+};
+
+static ssize_t size_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+       int n;
+
+       spin_lock(&pd->lock);
+       n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
+       spin_unlock(&pd->lock);
+       return n;
+}
+static DEVICE_ATTR_RO(size);
+
+static void init_write_congestion_marks(int* lo, int* hi)
+{
+       if (*hi > 0) {
+               *hi = max(*hi, 500);
+               *hi = min(*hi, 1000000);
+               if (*lo <= 0)
+                       *lo = *hi - 100;
+               else {
+                       *lo = min(*lo, *hi - 100);
+                       *lo = max(*lo, 100);
+               }
+       } else {
+               *hi = -1;
+               *lo = -1;
+       }
+}
+
+static ssize_t congestion_off_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+       int n;
+
+       spin_lock(&pd->lock);
+       n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
+       spin_unlock(&pd->lock);
+       return n;
+}
+
+static ssize_t congestion_off_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t len)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+       int val;
+
+       if (sscanf(buf, "%d", &val) == 1) {
+               spin_lock(&pd->lock);
+               pd->write_congestion_off = val;
+               init_write_congestion_marks(&pd->write_congestion_off,
+                                       &pd->write_congestion_on);
+               spin_unlock(&pd->lock);
+       }
+       return len;
+}
+static DEVICE_ATTR_RW(congestion_off);
+
+static ssize_t congestion_on_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+       int n;
+
+       spin_lock(&pd->lock);
+       n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
+       spin_unlock(&pd->lock);
+       return n;
+}
+
+static ssize_t congestion_on_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t len)
+{
+       struct pktcdvd_device *pd = dev_get_drvdata(dev);
+       int val;
+
+       if (sscanf(buf, "%d", &val) == 1) {
+               spin_lock(&pd->lock);
+               pd->write_congestion_on = val;
+               init_write_congestion_marks(&pd->write_congestion_off,
+                                       &pd->write_congestion_on);
+               spin_unlock(&pd->lock);
+       }
+       return len;
+}
+static DEVICE_ATTR_RW(congestion_on);
+
+static struct attribute *pkt_wq_attrs[] = {
+       &dev_attr_congestion_on.attr,
+       &dev_attr_congestion_off.attr,
+       &dev_attr_size.attr,
+       NULL,
+};
+
+static const struct attribute_group pkt_wq_group = {
+       .name = "write_queue",
+       .attrs = pkt_wq_attrs,
+};
+
+static const struct attribute_group *pkt_groups[] = {
+       &pkt_stat_group,
+       &pkt_wq_group,
+       NULL,
+};
+
+static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
+{
+       if (class_pktcdvd) {
+               pd->dev = device_create_with_groups(class_pktcdvd, NULL,
+                                                   MKDEV(0, 0), pd, pkt_groups,
+                                                   "%s", pd->name);
+               if (IS_ERR(pd->dev))
+                       pd->dev = NULL;
+       }
+}
+
+static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
+{
+       if (class_pktcdvd)
+               device_unregister(pd->dev);
+}
+
+
+/********************************************************************
+  /sys/class/pktcdvd/
+                     add            map block device
+                     remove         unmap packet dev
+                     device_map     show mappings
+ *******************************************************************/
+
+static void class_pktcdvd_release(struct class *cls)
+{
+       kfree(cls);
+}
+
+static ssize_t device_map_show(struct class *c, struct class_attribute *attr,
+                              char *data)
+{
+       int n = 0;
+       int idx;
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+       for (idx = 0; idx < MAX_WRITERS; idx++) {
+               struct pktcdvd_device *pd = pkt_devs[idx];
+               if (!pd)
+                       continue;
+               n += sprintf(data+n, "%s %u:%u %u:%u\n",
+                       pd->name,
+                       MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev),
+                       MAJOR(pd->bdev->bd_dev),
+                       MINOR(pd->bdev->bd_dev));
+       }
+       mutex_unlock(&ctl_mutex);
+       return n;
+}
+static CLASS_ATTR_RO(device_map);
+
+static ssize_t add_store(struct class *c, struct class_attribute *attr,
+                        const char *buf, size_t count)
+{
+       unsigned int major, minor;
+
+       if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+               /* pkt_setup_dev() expects caller to hold reference to self */
+               if (!try_module_get(THIS_MODULE))
+                       return -ENODEV;
+
+               pkt_setup_dev(MKDEV(major, minor), NULL);
+
+               module_put(THIS_MODULE);
+
+               return count;
+       }
+
+       return -EINVAL;
+}
+static CLASS_ATTR_WO(add);
+
+static ssize_t remove_store(struct class *c, struct class_attribute *attr,
+                           const char *buf, size_t count)
+{
+       unsigned int major, minor;
+       if (sscanf(buf, "%u:%u", &major, &minor) == 2) {
+               pkt_remove_dev(MKDEV(major, minor));
+               return count;
+       }
+       return -EINVAL;
+}
+static CLASS_ATTR_WO(remove);
+
+static struct attribute *class_pktcdvd_attrs[] = {
+       &class_attr_add.attr,
+       &class_attr_remove.attr,
+       &class_attr_device_map.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(class_pktcdvd);
+
+static int pkt_sysfs_init(void)
+{
+       int ret = 0;
+
+       /*
+        * create control files in sysfs
+        * /sys/class/pktcdvd/...
+        */
+       class_pktcdvd = kzalloc(sizeof(*class_pktcdvd), GFP_KERNEL);
+       if (!class_pktcdvd)
+               return -ENOMEM;
+       class_pktcdvd->name = DRIVER_NAME;
+       class_pktcdvd->owner = THIS_MODULE;
+       class_pktcdvd->class_release = class_pktcdvd_release;
+       class_pktcdvd->class_groups = class_pktcdvd_groups;
+       ret = class_register(class_pktcdvd);
+       if (ret) {
+               kfree(class_pktcdvd);
+               class_pktcdvd = NULL;
+               pr_err("failed to create class pktcdvd\n");
+               return ret;
+       }
+       return 0;
+}
+
+static void pkt_sysfs_cleanup(void)
+{
+       if (class_pktcdvd)
+               class_destroy(class_pktcdvd);
+       class_pktcdvd = NULL;
+}
+
+/********************************************************************
+  entries in debugfs
+
+  /sys/kernel/debug/pktcdvd[0-7]/
+                       info
+
+ *******************************************************************/
+
+static int pkt_debugfs_seq_show(struct seq_file *m, void *p)
+{
+       return pkt_seq_show(m, p);
+}
+
+static int pkt_debugfs_fops_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, pkt_debugfs_seq_show, inode->i_private);
+}
+
+static const struct file_operations debug_fops = {
+       .open           = pkt_debugfs_fops_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .owner          = THIS_MODULE,
+};
+
+static void pkt_debugfs_dev_new(struct pktcdvd_device *pd)
+{
+       if (!pkt_debugfs_root)
+               return;
+       pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root);
+       if (!pd->dfs_d_root)
+               return;
+
+       pd->dfs_f_info = debugfs_create_file("info", 0444,
+                                            pd->dfs_d_root, pd, &debug_fops);
+}
+
+static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd)
+{
+       if (!pkt_debugfs_root)
+               return;
+       debugfs_remove(pd->dfs_f_info);
+       debugfs_remove(pd->dfs_d_root);
+       pd->dfs_f_info = NULL;
+       pd->dfs_d_root = NULL;
+}
+
+static void pkt_debugfs_init(void)
+{
+       pkt_debugfs_root = debugfs_create_dir(DRIVER_NAME, NULL);
+}
+
+static void pkt_debugfs_cleanup(void)
+{
+       debugfs_remove(pkt_debugfs_root);
+       pkt_debugfs_root = NULL;
+}
+
+/* ----------------------------------------------------------*/
+
+
+static void pkt_bio_finished(struct pktcdvd_device *pd)
+{
+       BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
+       if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
+               pkt_dbg(2, pd, "queue empty\n");
+               atomic_set(&pd->iosched.attention, 1);
+               wake_up(&pd->wqueue);
+       }
+}
+
+/*
+ * Allocate a packet_data struct
+ */
+static struct packet_data *pkt_alloc_packet_data(int frames)
+{
+       int i;
+       struct packet_data *pkt;
+
+       pkt = kzalloc(sizeof(struct packet_data), GFP_KERNEL);
+       if (!pkt)
+               goto no_pkt;
+
+       pkt->frames = frames;
+       pkt->w_bio = bio_kmalloc(frames, GFP_KERNEL);
+       if (!pkt->w_bio)
+               goto no_bio;
+
+       for (i = 0; i < frames / FRAMES_PER_PAGE; i++) {
+               pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+               if (!pkt->pages[i])
+                       goto no_page;
+       }
+
+       spin_lock_init(&pkt->lock);
+       bio_list_init(&pkt->orig_bios);
+
+       for (i = 0; i < frames; i++) {
+               pkt->r_bios[i] = bio_kmalloc(1, GFP_KERNEL);
+               if (!pkt->r_bios[i])
+                       goto no_rd_bio;
+       }
+
+       return pkt;
+
+no_rd_bio:
+       for (i = 0; i < frames; i++)
+               kfree(pkt->r_bios[i]);
+no_page:
+       for (i = 0; i < frames / FRAMES_PER_PAGE; i++)
+               if (pkt->pages[i])
+                       __free_page(pkt->pages[i]);
+       kfree(pkt->w_bio);
+no_bio:
+       kfree(pkt);
+no_pkt:
+       return NULL;
+}
+
+/*
+ * Free a packet_data struct
+ */
+static void pkt_free_packet_data(struct packet_data *pkt)
+{
+       int i;
+
+       for (i = 0; i < pkt->frames; i++)
+               kfree(pkt->r_bios[i]);
+       for (i = 0; i < pkt->frames / FRAMES_PER_PAGE; i++)
+               __free_page(pkt->pages[i]);
+       kfree(pkt->w_bio);
+       kfree(pkt);
+}
+
+static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
+{
+       struct packet_data *pkt, *next;
+
+       BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
+
+       list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
+               pkt_free_packet_data(pkt);
+       }
+       INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+}
+
+static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
+{
+       struct packet_data *pkt;
+
+       BUG_ON(!list_empty(&pd->cdrw.pkt_free_list));
+
+       while (nr_packets > 0) {
+               pkt = pkt_alloc_packet_data(pd->settings.size >> 2);
+               if (!pkt) {
+                       pkt_shrink_pktlist(pd);
+                       return 0;
+               }
+               pkt->id = nr_packets;
+               pkt->pd = pd;
+               list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+               nr_packets--;
+       }
+       return 1;
+}
+
+static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
+{
+       struct rb_node *n = rb_next(&node->rb_node);
+       if (!n)
+               return NULL;
+       return rb_entry(n, struct pkt_rb_node, rb_node);
+}
+
+static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+       rb_erase(&node->rb_node, &pd->bio_queue);
+       mempool_free(node, &pd->rb_pool);
+       pd->bio_queue_size--;
+       BUG_ON(pd->bio_queue_size < 0);
+}
+
+/*
+ * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
+ */
+static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
+{
+       struct rb_node *n = pd->bio_queue.rb_node;
+       struct rb_node *next;
+       struct pkt_rb_node *tmp;
+
+       if (!n) {
+               BUG_ON(pd->bio_queue_size > 0);
+               return NULL;
+       }
+
+       for (;;) {
+               tmp = rb_entry(n, struct pkt_rb_node, rb_node);
+               if (s <= tmp->bio->bi_iter.bi_sector)
+                       next = n->rb_left;
+               else
+                       next = n->rb_right;
+               if (!next)
+                       break;
+               n = next;
+       }
+
+       if (s > tmp->bio->bi_iter.bi_sector) {
+               tmp = pkt_rbtree_next(tmp);
+               if (!tmp)
+                       return NULL;
+       }
+       BUG_ON(s > tmp->bio->bi_iter.bi_sector);
+       return tmp;
+}
+
+/*
+ * Insert a node into the pd->bio_queue rb tree.
+ */
+static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+       struct rb_node **p = &pd->bio_queue.rb_node;
+       struct rb_node *parent = NULL;
+       sector_t s = node->bio->bi_iter.bi_sector;
+       struct pkt_rb_node *tmp;
+
+       while (*p) {
+               parent = *p;
+               tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
+               if (s < tmp->bio->bi_iter.bi_sector)
+                       p = &(*p)->rb_left;
+               else
+                       p = &(*p)->rb_right;
+       }
+       rb_link_node(&node->rb_node, parent, p);
+       rb_insert_color(&node->rb_node, &pd->bio_queue);
+       pd->bio_queue_size++;
+}
+
+/*
+ * Send a packet_command to the underlying block device and
+ * wait for completion.
+ */
+static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+       struct request_queue *q = bdev_get_queue(pd->bdev);
+       struct scsi_cmnd *scmd;
+       struct request *rq;
+       int ret = 0;
+
+       rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+                            REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+       if (IS_ERR(rq))
+               return PTR_ERR(rq);
+       scmd = blk_mq_rq_to_pdu(rq);
+
+       if (cgc->buflen) {
+               ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen,
+                                     GFP_NOIO);
+               if (ret)
+                       goto out;
+       }
+
+       scmd->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
+       memcpy(scmd->cmnd, cgc->cmd, CDROM_PACKET_SIZE);
+
+       rq->timeout = 60*HZ;
+       if (cgc->quiet)
+               rq->rq_flags |= RQF_QUIET;
+
+       blk_execute_rq(rq, false);
+       if (scmd->result)
+               ret = -EIO;
+out:
+       blk_mq_free_request(rq);
+       return ret;
+}
+
+static const char *sense_key_string(__u8 index)
+{
+       static const char * const info[] = {
+               "No sense", "Recovered error", "Not ready",
+               "Medium error", "Hardware error", "Illegal request",
+               "Unit attention", "Data protect", "Blank check",
+       };
+
+       return index < ARRAY_SIZE(info) ? info[index] : "INVALID";
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct pktcdvd_device *pd,
+                          struct packet_command *cgc)
+{
+       struct scsi_sense_hdr *sshdr = cgc->sshdr;
+
+       if (sshdr)
+               pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n",
+                       CDROM_PACKET_SIZE, cgc->cmd,
+                       sshdr->sense_key, sshdr->asc, sshdr->ascq,
+                       sense_key_string(sshdr->sense_key));
+       else
+               pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd);
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+       struct packet_command cgc;
+
+       init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+       cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+       cgc.quiet = 1;
+
+       /*
+        * the IMMED bit -- we default to not setting it, although that
+        * would allow a much faster close, this is safer
+        */
+#if 0
+       cgc.cmd[1] = 1 << 1;
+#endif
+       return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd,
+                               unsigned write_speed, unsigned read_speed)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       int ret;
+
+       init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+       cgc.sshdr = &sshdr;
+       cgc.cmd[0] = GPCMD_SET_SPEED;
+       cgc.cmd[2] = (read_speed >> 8) & 0xff;
+       cgc.cmd[3] = read_speed & 0xff;
+       cgc.cmd[4] = (write_speed >> 8) & 0xff;
+       cgc.cmd[5] = write_speed & 0xff;
+
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret)
+               pkt_dump_sense(pd, &cgc);
+
+       return ret;
+}
+
+/*
+ * Queue a bio for processing by the low-level CD device. Must be called
+ * from process context.
+ */
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio)
+{
+       spin_lock(&pd->iosched.lock);
+       if (bio_data_dir(bio) == READ)
+               bio_list_add(&pd->iosched.read_queue, bio);
+       else
+               bio_list_add(&pd->iosched.write_queue, bio);
+       spin_unlock(&pd->iosched.lock);
+
+       atomic_set(&pd->iosched.attention, 1);
+       wake_up(&pd->wqueue);
+}
+
+/*
+ * Process the queued read/write requests. This function handles special
+ * requirements for CDRW drives:
+ * - A cache flush command must be inserted before a read request if the
+ *   previous request was a write.
+ * - Switching between reading and writing is slow, so don't do it more often
+ *   than necessary.
+ * - Optimize for throughput at the expense of latency. This means that streaming
+ *   writes will never be interrupted by a read, but if the drive has to seek
+ *   before the next write, switch to reading instead if there are any pending
+ *   read requests.
+ * - Set the read speed according to current usage pattern. When only reading
+ *   from the device, it's best to use the highest possible read speed, but
+ *   when switching often between reading and writing, it's better to have the
+ *   same read and write speeds.
+ */
+static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
+{
+
+       if (atomic_read(&pd->iosched.attention) == 0)
+               return;
+       atomic_set(&pd->iosched.attention, 0);
+
+       for (;;) {
+               struct bio *bio;
+               int reads_queued, writes_queued;
+
+               spin_lock(&pd->iosched.lock);
+               reads_queued = !bio_list_empty(&pd->iosched.read_queue);
+               writes_queued = !bio_list_empty(&pd->iosched.write_queue);
+               spin_unlock(&pd->iosched.lock);
+
+               if (!reads_queued && !writes_queued)
+                       break;
+
+               if (pd->iosched.writing) {
+                       int need_write_seek = 1;
+                       spin_lock(&pd->iosched.lock);
+                       bio = bio_list_peek(&pd->iosched.write_queue);
+                       spin_unlock(&pd->iosched.lock);
+                       if (bio && (bio->bi_iter.bi_sector ==
+                                   pd->iosched.last_write))
+                               need_write_seek = 0;
+                       if (need_write_seek && reads_queued) {
+                               if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+                                       pkt_dbg(2, pd, "write, waiting\n");
+                                       break;
+                               }
+                               pkt_flush_cache(pd);
+                               pd->iosched.writing = 0;
+                       }
+               } else {
+                       if (!reads_queued && writes_queued) {
+                               if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+                                       pkt_dbg(2, pd, "read, waiting\n");
+                                       break;
+                               }
+                               pd->iosched.writing = 1;
+                       }
+               }
+
+               spin_lock(&pd->iosched.lock);
+               if (pd->iosched.writing)
+                       bio = bio_list_pop(&pd->iosched.write_queue);
+               else
+                       bio = bio_list_pop(&pd->iosched.read_queue);
+               spin_unlock(&pd->iosched.lock);
+
+               if (!bio)
+                       continue;
+
+               if (bio_data_dir(bio) == READ)
+                       pd->iosched.successive_reads +=
+                               bio->bi_iter.bi_size >> 10;
+               else {
+                       pd->iosched.successive_reads = 0;
+                       pd->iosched.last_write = bio_end_sector(bio);
+               }
+               if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
+                       if (pd->read_speed == pd->write_speed) {
+                               pd->read_speed = MAX_SPEED;
+                               pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+                       }
+               } else {
+                       if (pd->read_speed != pd->write_speed) {
+                               pd->read_speed = pd->write_speed;
+                               pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+                       }
+               }
+
+               atomic_inc(&pd->cdrw.pending_bios);
+               submit_bio_noacct(bio);
+       }
+}
+
+/*
+ * Special care is needed if the underlying block device has a small
+ * max_phys_segments value.
+ */
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q)
+{
+       if ((pd->settings.size << 9) / CD_FRAMESIZE
+           <= queue_max_segments(q)) {
+               /*
+                * The cdrom device can handle one segment/frame
+                */
+               clear_bit(PACKET_MERGE_SEGS, &pd->flags);
+               return 0;
+       } else if ((pd->settings.size << 9) / PAGE_SIZE
+                  <= queue_max_segments(q)) {
+               /*
+                * We can handle this case at the expense of some extra memory
+                * copies during write operations
+                */
+               set_bit(PACKET_MERGE_SEGS, &pd->flags);
+               return 0;
+       } else {
+               pkt_err(pd, "cdrom max_phys_segments too small\n");
+               return -EIO;
+       }
+}
+
+static void pkt_end_io_read(struct bio *bio)
+{
+       struct packet_data *pkt = bio->bi_private;
+       struct pktcdvd_device *pd = pkt->pd;
+       BUG_ON(!pd);
+
+       pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
+               bio, (unsigned long long)pkt->sector,
+               (unsigned long long)bio->bi_iter.bi_sector, bio->bi_status);
+
+       if (bio->bi_status)
+               atomic_inc(&pkt->io_errors);
+       bio_uninit(bio);
+       if (atomic_dec_and_test(&pkt->io_wait)) {
+               atomic_inc(&pkt->run_sm);
+               wake_up(&pd->wqueue);
+       }
+       pkt_bio_finished(pd);
+}
+
+static void pkt_end_io_packet_write(struct bio *bio)
+{
+       struct packet_data *pkt = bio->bi_private;
+       struct pktcdvd_device *pd = pkt->pd;
+       BUG_ON(!pd);
+
+       pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, bio->bi_status);
+
+       pd->stats.pkt_ended++;
+
+       bio_uninit(bio);
+       pkt_bio_finished(pd);
+       atomic_dec(&pkt->io_wait);
+       atomic_inc(&pkt->run_sm);
+       wake_up(&pd->wqueue);
+}
+
+/*
+ * Schedule reads for the holes in a packet
+ */
+static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+       int frames_read = 0;
+       struct bio *bio;
+       int f;
+       char written[PACKET_MAX_SIZE];
+
+       BUG_ON(bio_list_empty(&pkt->orig_bios));
+
+       atomic_set(&pkt->io_wait, 0);
+       atomic_set(&pkt->io_errors, 0);
+
+       /*
+        * Figure out which frames we need to read before we can write.
+        */
+       memset(written, 0, sizeof(written));
+       spin_lock(&pkt->lock);
+       bio_list_for_each(bio, &pkt->orig_bios) {
+               int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
+                       (CD_FRAMESIZE >> 9);
+               int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
+               pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
+               BUG_ON(first_frame < 0);
+               BUG_ON(first_frame + num_frames > pkt->frames);
+               for (f = first_frame; f < first_frame + num_frames; f++)
+                       written[f] = 1;
+       }
+       spin_unlock(&pkt->lock);
+
+       if (pkt->cache_valid) {
+               pkt_dbg(2, pd, "zone %llx cached\n",
+                       (unsigned long long)pkt->sector);
+               goto out_account;
+       }
+
+       /*
+        * Schedule reads for missing parts of the packet.
+        */
+       for (f = 0; f < pkt->frames; f++) {
+               int p, offset;
+
+               if (written[f])
+                       continue;
+
+               bio = pkt->r_bios[f];
+               bio_init(bio, pd->bdev, bio->bi_inline_vecs, 1, REQ_OP_READ);
+               bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+               bio->bi_end_io = pkt_end_io_read;
+               bio->bi_private = pkt;
+
+               p = (f * CD_FRAMESIZE) / PAGE_SIZE;
+               offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+               pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n",
+                       f, pkt->pages[p], offset);
+               if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
+                       BUG();
+
+               atomic_inc(&pkt->io_wait);
+               pkt_queue_bio(pd, bio);
+               frames_read++;
+       }
+
+out_account:
+       pkt_dbg(2, pd, "need %d frames for zone %llx\n",
+               frames_read, (unsigned long long)pkt->sector);
+       pd->stats.pkt_started++;
+       pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
+}
+
+/*
+ * Find a packet matching zone, or the least recently used packet if
+ * there is no match.
+ */
+static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
+{
+       struct packet_data *pkt;
+
+       list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
+               if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
+                       list_del_init(&pkt->list);
+                       if (pkt->sector != zone)
+                               pkt->cache_valid = 0;
+                       return pkt;
+               }
+       }
+       BUG();
+       return NULL;
+}
+
+static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+       if (pkt->cache_valid) {
+               list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+       } else {
+               list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
+       }
+}
+
+static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+{
+#if PACKET_DEBUG > 1
+       static const char *state_name[] = {
+               "IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
+       };
+       enum packet_data_state old_state = pkt->state;
+       pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n",
+               pkt->id, (unsigned long long)pkt->sector,
+               state_name[old_state], state_name[state]);
+#endif
+       pkt->state = state;
+}
+
+/*
+ * Scan the work queue to see if we can start a new packet.
+ * returns non-zero if any work was done.
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd)
+{
+       struct packet_data *pkt, *p;
+       struct bio *bio = NULL;
+       sector_t zone = 0; /* Suppress gcc warning */
+       struct pkt_rb_node *node, *first_node;
+       struct rb_node *n;
+
+       atomic_set(&pd->scan_queue, 0);
+
+       if (list_empty(&pd->cdrw.pkt_free_list)) {
+               pkt_dbg(2, pd, "no pkt\n");
+               return 0;
+       }
+
+       /*
+        * Try to find a zone we are not already working on.
+        */
+       spin_lock(&pd->lock);
+       first_node = pkt_rbtree_find(pd, pd->current_sector);
+       if (!first_node) {
+               n = rb_first(&pd->bio_queue);
+               if (n)
+                       first_node = rb_entry(n, struct pkt_rb_node, rb_node);
+       }
+       node = first_node;
+       while (node) {
+               bio = node->bio;
+               zone = get_zone(bio->bi_iter.bi_sector, pd);
+               list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
+                       if (p->sector == zone) {
+                               bio = NULL;
+                               goto try_next_bio;
+                       }
+               }
+               break;
+try_next_bio:
+               node = pkt_rbtree_next(node);
+               if (!node) {
+                       n = rb_first(&pd->bio_queue);
+                       if (n)
+                               node = rb_entry(n, struct pkt_rb_node, rb_node);
+               }
+               if (node == first_node)
+                       node = NULL;
+       }
+       spin_unlock(&pd->lock);
+       if (!bio) {
+               pkt_dbg(2, pd, "no bio\n");
+               return 0;
+       }
+
+       pkt = pkt_get_packet_data(pd, zone);
+
+       pd->current_sector = zone + pd->settings.size;
+       pkt->sector = zone;
+       BUG_ON(pkt->frames != pd->settings.size >> 2);
+       pkt->write_size = 0;
+
+       /*
+        * Scan work queue for bios in the same zone and link them
+        * to this packet.
+        */
+       spin_lock(&pd->lock);
+       pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
+       while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+               bio = node->bio;
+               pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
+                       get_zone(bio->bi_iter.bi_sector, pd));
+               if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
+                       break;
+               pkt_rbtree_erase(pd, node);
+               spin_lock(&pkt->lock);
+               bio_list_add(&pkt->orig_bios, bio);
+               pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
+               spin_unlock(&pkt->lock);
+       }
+       /* check write congestion marks, and if bio_queue_size is
+        * below, wake up any waiters
+        */
+       if (pd->congested &&
+           pd->bio_queue_size <= pd->write_congestion_off) {
+               pd->congested = false;
+               wake_up_var(&pd->congested);
+       }
+       spin_unlock(&pd->lock);
+
+       pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
+       pkt_set_state(pkt, PACKET_WAITING_STATE);
+       atomic_set(&pkt->run_sm, 1);
+
+       spin_lock(&pd->cdrw.active_list_lock);
+       list_add(&pkt->list, &pd->cdrw.pkt_active_list);
+       spin_unlock(&pd->cdrw.active_list_lock);
+
+       return 1;
+}
+
+/**
+ * bio_list_copy_data - copy contents of data buffers from one chain of bios to
+ * another
+ * @src: source bio list
+ * @dst: destination bio list
+ *
+ * Stops when it reaches the end of either the @src list or @dst list - that is,
+ * copies min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of
+ * bios).
+ */
+static void bio_list_copy_data(struct bio *dst, struct bio *src)
+{
+       struct bvec_iter src_iter = src->bi_iter;
+       struct bvec_iter dst_iter = dst->bi_iter;
+
+       while (1) {
+               if (!src_iter.bi_size) {
+                       src = src->bi_next;
+                       if (!src)
+                               break;
+
+                       src_iter = src->bi_iter;
+               }
+
+               if (!dst_iter.bi_size) {
+                       dst = dst->bi_next;
+                       if (!dst)
+                               break;
+
+                       dst_iter = dst->bi_iter;
+               }
+
+               bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
+       }
+}
+
+/*
+ * Assemble a bio to write one packet and queue the bio for processing
+ * by the underlying block device.
+ */
+static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+       int f;
+
+       bio_init(pkt->w_bio, pd->bdev, pkt->w_bio->bi_inline_vecs, pkt->frames,
+                REQ_OP_WRITE);
+       pkt->w_bio->bi_iter.bi_sector = pkt->sector;
+       pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+       pkt->w_bio->bi_private = pkt;
+
+       /* XXX: locking? */
+       for (f = 0; f < pkt->frames; f++) {
+               struct page *page = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+               unsigned offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+
+               if (!bio_add_page(pkt->w_bio, page, CD_FRAMESIZE, offset))
+                       BUG();
+       }
+       pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt);
+
+       /*
+        * Fill-in bvec with data from orig_bios.
+        */
+       spin_lock(&pkt->lock);
+       bio_list_copy_data(pkt->w_bio, pkt->orig_bios.head);
+
+       pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+       spin_unlock(&pkt->lock);
+
+       pkt_dbg(2, pd, "Writing %d frames for zone %llx\n",
+               pkt->write_size, (unsigned long long)pkt->sector);
+
+       if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames))
+               pkt->cache_valid = 1;
+       else
+               pkt->cache_valid = 0;
+
+       /* Start the write request */
+       atomic_set(&pkt->io_wait, 1);
+       pkt_queue_bio(pd, pkt->w_bio);
+}
+
+static void pkt_finish_packet(struct packet_data *pkt, blk_status_t status)
+{
+       struct bio *bio;
+
+       if (status)
+               pkt->cache_valid = 0;
+
+       /* Finish all bios corresponding to this packet */
+       while ((bio = bio_list_pop(&pkt->orig_bios))) {
+               bio->bi_status = status;
+               bio_endio(bio);
+       }
+}
+
+static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+       pkt_dbg(2, pd, "pkt %d\n", pkt->id);
+
+       for (;;) {
+               switch (pkt->state) {
+               case PACKET_WAITING_STATE:
+                       if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
+                               return;
+
+                       pkt->sleep_time = 0;
+                       pkt_gather_data(pd, pkt);
+                       pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+                       break;
+
+               case PACKET_READ_WAIT_STATE:
+                       if (atomic_read(&pkt->io_wait) > 0)
+                               return;
+
+                       if (atomic_read(&pkt->io_errors) > 0) {
+                               pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+                       } else {
+                               pkt_start_write(pd, pkt);
+                       }
+                       break;
+
+               case PACKET_WRITE_WAIT_STATE:
+                       if (atomic_read(&pkt->io_wait) > 0)
+                               return;
+
+                       if (!pkt->w_bio->bi_status) {
+                               pkt_set_state(pkt, PACKET_FINISHED_STATE);
+                       } else {
+                               pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+                       }
+                       break;
+
+               case PACKET_RECOVERY_STATE:
+                       pkt_dbg(2, pd, "No recovery possible\n");
+                       pkt_set_state(pkt, PACKET_FINISHED_STATE);
+                       break;
+
+               case PACKET_FINISHED_STATE:
+                       pkt_finish_packet(pkt, pkt->w_bio->bi_status);
+                       return;
+
+               default:
+                       BUG();
+                       break;
+               }
+       }
+}
+
+static void pkt_handle_packets(struct pktcdvd_device *pd)
+{
+       struct packet_data *pkt, *next;
+
+       /*
+        * Run state machine for active packets
+        */
+       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+               if (atomic_read(&pkt->run_sm) > 0) {
+                       atomic_set(&pkt->run_sm, 0);
+                       pkt_run_state_machine(pd, pkt);
+               }
+       }
+
+       /*
+        * Move no longer active packets to the free list
+        */
+       spin_lock(&pd->cdrw.active_list_lock);
+       list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
+               if (pkt->state == PACKET_FINISHED_STATE) {
+                       list_del(&pkt->list);
+                       pkt_put_packet_data(pd, pkt);
+                       pkt_set_state(pkt, PACKET_IDLE_STATE);
+                       atomic_set(&pd->scan_queue, 1);
+               }
+       }
+       spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
+{
+       struct packet_data *pkt;
+       int i;
+
+       for (i = 0; i < PACKET_NUM_STATES; i++)
+               states[i] = 0;
+
+       spin_lock(&pd->cdrw.active_list_lock);
+       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+               states[pkt->state]++;
+       }
+       spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+/*
+ * kcdrwd is woken up when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+       struct pktcdvd_device *pd = foobar;
+       struct packet_data *pkt;
+       long min_sleep_time, residue;
+
+       set_user_nice(current, MIN_NICE);
+       set_freezable();
+
+       for (;;) {
+               DECLARE_WAITQUEUE(wait, current);
+
+               /*
+                * Wait until there is something to do
+                */
+               add_wait_queue(&pd->wqueue, &wait);
+               for (;;) {
+                       set_current_state(TASK_INTERRUPTIBLE);
+
+                       /* Check if we need to run pkt_handle_queue */
+                       if (atomic_read(&pd->scan_queue) > 0)
+                               goto work_to_do;
+
+                       /* Check if we need to run the state machine for some packet */
+                       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+                               if (atomic_read(&pkt->run_sm) > 0)
+                                       goto work_to_do;
+                       }
+
+                       /* Check if we need to process the iosched queues */
+                       if (atomic_read(&pd->iosched.attention) != 0)
+                               goto work_to_do;
+
+                       /* Otherwise, go to sleep */
+                       if (PACKET_DEBUG > 1) {
+                               int states[PACKET_NUM_STATES];
+                               pkt_count_states(pd, states);
+                               pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+                                       states[0], states[1], states[2],
+                                       states[3], states[4], states[5]);
+                       }
+
+                       min_sleep_time = MAX_SCHEDULE_TIMEOUT;
+                       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+                               if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
+                                       min_sleep_time = pkt->sleep_time;
+                       }
+
+                       pkt_dbg(2, pd, "sleeping\n");
+                       residue = schedule_timeout(min_sleep_time);
+                       pkt_dbg(2, pd, "wake up\n");
+
+                       /* make swsusp happy with our thread */
+                       try_to_freeze();
+
+                       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+                               if (!pkt->sleep_time)
+                                       continue;
+                               pkt->sleep_time -= min_sleep_time - residue;
+                               if (pkt->sleep_time <= 0) {
+                                       pkt->sleep_time = 0;
+                                       atomic_inc(&pkt->run_sm);
+                               }
+                       }
+
+                       if (kthread_should_stop())
+                               break;
+               }
+work_to_do:
+               set_current_state(TASK_RUNNING);
+               remove_wait_queue(&pd->wqueue, &wait);
+
+               if (kthread_should_stop())
+                       break;
+
+               /*
+                * if pkt_handle_queue returns true, we can queue
+                * another request.
+                */
+               while (pkt_handle_queue(pd))
+                       ;
+
+               /*
+                * Handle packet state machine
+                */
+               pkt_handle_packets(pd);
+
+               /*
+                * Handle iosched queues
+                */
+               pkt_iosched_process_queue(pd);
+       }
+
+       return 0;
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+       pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n",
+                pd->settings.fp ? "Fixed" : "Variable",
+                pd->settings.size >> 2,
+                pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
+{
+       memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+       cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+       cgc->cmd[2] = page_code | (page_control << 6);
+       cgc->cmd[7] = cgc->buflen >> 8;
+       cgc->cmd[8] = cgc->buflen & 0xff;
+       cgc->data_direction = CGC_DATA_READ;
+       return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+       memset(cgc->cmd, 0, sizeof(cgc->cmd));
+       memset(cgc->buffer, 0, 2);
+       cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+       cgc->cmd[1] = 0x10;             /* PF */
+       cgc->cmd[7] = cgc->buflen >> 8;
+       cgc->cmd[8] = cgc->buflen & 0xff;
+       cgc->data_direction = CGC_DATA_WRITE;
+       return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
+{
+       struct packet_command cgc;
+       int ret;
+
+       /* set up command and get the disc info */
+       init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+       cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+       cgc.cmd[8] = cgc.buflen = 2;
+       cgc.quiet = 1;
+
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret)
+               return ret;
+
+       /* not all drives have the same disc_info length, so requeue
+        * packet with the length the drive tells us it can supply
+        */
+       cgc.buflen = be16_to_cpu(di->disc_information_length) +
+                    sizeof(di->disc_information_length);
+
+       if (cgc.buflen > sizeof(disc_information))
+               cgc.buflen = sizeof(disc_information);
+
+       cgc.cmd[8] = cgc.buflen;
+       return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
+{
+       struct packet_command cgc;
+       int ret;
+
+       init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+       cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+       cgc.cmd[1] = type & 3;
+       cgc.cmd[4] = (track & 0xff00) >> 8;
+       cgc.cmd[5] = track & 0xff;
+       cgc.cmd[8] = 8;
+       cgc.quiet = 1;
+
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret)
+               return ret;
+
+       cgc.buflen = be16_to_cpu(ti->track_information_length) +
+                    sizeof(ti->track_information_length);
+
+       if (cgc.buflen > sizeof(track_information))
+               cgc.buflen = sizeof(track_information);
+
+       cgc.cmd[8] = cgc.buflen;
+       return pkt_generic_packet(pd, &cgc);
+}
+
+static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd,
+                                               long *last_written)
+{
+       disc_information di;
+       track_information ti;
+       __u32 last_track;
+       int ret;
+
+       ret = pkt_get_disc_info(pd, &di);
+       if (ret)
+               return ret;
+
+       last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+       ret = pkt_get_track_info(pd, last_track, 1, &ti);
+       if (ret)
+               return ret;
+
+       /* if this track is blank, try the previous. */
+       if (ti.blank) {
+               last_track--;
+               ret = pkt_get_track_info(pd, last_track, 1, &ti);
+               if (ret)
+                       return ret;
+       }
+
+       /* if last recorded field is valid, return it. */
+       if (ti.lra_v) {
+               *last_written = be32_to_cpu(ti.last_rec_address);
+       } else {
+               /* make it up instead */
+               *last_written = be32_to_cpu(ti.track_start) +
+                               be32_to_cpu(ti.track_size);
+               if (ti.free_blocks)
+                       *last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+       }
+       return 0;
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       write_param_page *wp;
+       char buffer[128];
+       int ret, size;
+
+       /* doesn't apply to DVD+RW or DVD-RAM */
+       if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
+               return 0;
+
+       memset(buffer, 0, sizeof(buffer));
+       init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+       cgc.sshdr = &sshdr;
+       ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+       if (ret) {
+               pkt_dump_sense(pd, &cgc);
+               return ret;
+       }
+
+       size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+       pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+       if (size > sizeof(buffer))
+               size = sizeof(buffer);
+
+       /*
+        * now get it all
+        */
+       init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+       cgc.sshdr = &sshdr;
+       ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0);
+       if (ret) {
+               pkt_dump_sense(pd, &cgc);
+               return ret;
+       }
+
+       /*
+        * write page is offset header + block descriptor length
+        */
+       wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+       wp->fp = pd->settings.fp;
+       wp->track_mode = pd->settings.track_mode;
+       wp->write_type = pd->settings.write_type;
+       wp->data_block_type = pd->settings.block_mode;
+
+       wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+       wp->link_size = 7;
+       wp->ls_v = 1;
+#endif
+
+       if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+               wp->session_format = 0;
+               wp->subhdr2 = 0x20;
+       } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+               wp->session_format = 0x20;
+               wp->subhdr2 = 8;
+#if 0
+               wp->mcn[0] = 0x80;
+               memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+       } else {
+               /*
+                * paranoia
+                */
+               pkt_err(pd, "write mode wrong %d\n", wp->data_block_type);
+               return 1;
+       }
+       wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+       cgc.buflen = cgc.cmd[8] = size;
+       ret = pkt_mode_select(pd, &cgc);
+       if (ret) {
+               pkt_dump_sense(pd, &cgc);
+               return ret;
+       }
+
+       pkt_print_settings(pd);
+       return 0;
+}
+
+/*
+ * 1 -- we can write to this track, 0 -- we can't
+ */
+static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti)
+{
+       switch (pd->mmc3_profile) {
+               case 0x1a: /* DVD+RW */
+               case 0x12: /* DVD-RAM */
+                       /* The track is always writable on DVD+RW/DVD-RAM */
+                       return 1;
+               default:
+                       break;
+       }
+
+       if (!ti->packet || !ti->fp)
+               return 0;
+
+       /*
+        * "good" settings as per Mt Fuji.
+        */
+       if (ti->rt == 0 && ti->blank == 0)
+               return 1;
+
+       if (ti->rt == 0 && ti->blank == 1)
+               return 1;
+
+       if (ti->rt == 1 && ti->blank == 0)
+               return 1;
+
+       pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+       return 0;
+}
+
+/*
+ * 1 -- we can write to this disc, 0 -- we can't
+ */
+static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+       switch (pd->mmc3_profile) {
+               case 0x0a: /* CD-RW */
+               case 0xffff: /* MMC3 not supported */
+                       break;
+               case 0x1a: /* DVD+RW */
+               case 0x13: /* DVD-RW */
+               case 0x12: /* DVD-RAM */
+                       return 1;
+               default:
+                       pkt_dbg(2, pd, "Wrong disc profile (%x)\n",
+                               pd->mmc3_profile);
+                       return 0;
+       }
+
+       /*
+        * for disc type 0xff we should probably reserve a new track.
+        * but i'm not sure, should we leave this to user apps? probably.
+        */
+       if (di->disc_type == 0xff) {
+               pkt_notice(pd, "unknown disc - no track?\n");
+               return 0;
+       }
+
+       if (di->disc_type != 0x20 && di->disc_type != 0) {
+               pkt_err(pd, "wrong disc type (%x)\n", di->disc_type);
+               return 0;
+       }
+
+       if (di->erasable == 0) {
+               pkt_notice(pd, "disc not erasable\n");
+               return 0;
+       }
+
+       if (di->border_status == PACKET_SESSION_RESERVED) {
+               pkt_err(pd, "can't write to last track (reserved)\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+       struct packet_command cgc;
+       unsigned char buf[12];
+       disc_information di;
+       track_information ti;
+       int ret, track;
+
+       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+       cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+       cgc.cmd[8] = 8;
+       ret = pkt_generic_packet(pd, &cgc);
+       pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+
+       memset(&di, 0, sizeof(disc_information));
+       memset(&ti, 0, sizeof(track_information));
+
+       ret = pkt_get_disc_info(pd, &di);
+       if (ret) {
+               pkt_err(pd, "failed get_disc\n");
+               return ret;
+       }
+
+       if (!pkt_writable_disc(pd, &di))
+               return -EROFS;
+
+       pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+       track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+       ret = pkt_get_track_info(pd, track, 1, &ti);
+       if (ret) {
+               pkt_err(pd, "failed get_track\n");
+               return ret;
+       }
+
+       if (!pkt_writable_track(pd, &ti)) {
+               pkt_err(pd, "can't write to this track\n");
+               return -EROFS;
+       }
+
+       /*
+        * we keep packet size in 512 byte units, makes it easier to
+        * deal with request calculations.
+        */
+       pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+       if (pd->settings.size == 0) {
+               pkt_notice(pd, "detected zero packet size!\n");
+               return -ENXIO;
+       }
+       if (pd->settings.size > PACKET_MAX_SECTORS) {
+               pkt_err(pd, "packet size is too big\n");
+               return -EROFS;
+       }
+       pd->settings.fp = ti.fp;
+       pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+       if (ti.nwa_v) {
+               pd->nwa = be32_to_cpu(ti.next_writable);
+               set_bit(PACKET_NWA_VALID, &pd->flags);
+       }
+
+       /*
+        * in theory we could use lra on -RW media as well and just zero
+        * blocks that haven't been written yet, but in practice that
+        * is just a no-go. we'll use that for -R, naturally.
+        */
+       if (ti.lra_v) {
+               pd->lra = be32_to_cpu(ti.last_rec_address);
+               set_bit(PACKET_LRA_VALID, &pd->flags);
+       } else {
+               pd->lra = 0xffffffff;
+               set_bit(PACKET_LRA_VALID, &pd->flags);
+       }
+
+       /*
+        * fine for now
+        */
+       pd->settings.link_loss = 7;
+       pd->settings.write_type = 0;    /* packet */
+       pd->settings.track_mode = ti.track_mode;
+
+       /*
+        * mode1 or mode2 disc
+        */
+       switch (ti.data_mode) {
+               case PACKET_MODE1:
+                       pd->settings.block_mode = PACKET_BLOCK_MODE1;
+                       break;
+               case PACKET_MODE2:
+                       pd->settings.block_mode = PACKET_BLOCK_MODE2;
+                       break;
+               default:
+                       pkt_err(pd, "unknown data mode\n");
+                       return -EROFS;
+       }
+       return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd,
+                                               int set)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       unsigned char buf[64];
+       int ret;
+
+       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+       cgc.sshdr = &sshdr;
+       cgc.buflen = pd->mode_offset + 12;
+
+       /*
+        * caching mode page might not be there, so quiet this command
+        */
+       cgc.quiet = 1;
+
+       ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0);
+       if (ret)
+               return ret;
+
+       buf[pd->mode_offset + 10] |= (!!set << 2);
+
+       cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+       ret = pkt_mode_select(pd, &cgc);
+       if (ret) {
+               pkt_err(pd, "write caching control failed\n");
+               pkt_dump_sense(pd, &cgc);
+       } else if (!ret && set)
+               pkt_notice(pd, "enabled write caching\n");
+       return ret;
+}
+
+static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
+{
+       struct packet_command cgc;
+
+       init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+       cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+       cgc.cmd[4] = lockflag ? 1 : 0;
+       return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * Returns drive maximum write speed
+ */
+static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd,
+                                               unsigned *write_speed)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       unsigned char buf[256+18];
+       unsigned char *cap_buf;
+       int ret, offset;
+
+       cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
+       init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+       cgc.sshdr = &sshdr;
+
+       ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+       if (ret) {
+               cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
+                            sizeof(struct mode_page_header);
+               ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+               if (ret) {
+                       pkt_dump_sense(pd, &cgc);
+                       return ret;
+               }
+       }
+
+       offset = 20;                        /* Obsoleted field, used by older drives */
+       if (cap_buf[1] >= 28)
+               offset = 28;                /* Current write speed selected */
+       if (cap_buf[1] >= 30) {
+               /* If the drive reports at least one "Logical Unit Write
+                * Speed Performance Descriptor Block", use the information
+                * in the first block. (contains the highest speed)
+                */
+               int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+               if (num_spdb > 0)
+                       offset = 34;
+       }
+
+       *write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+       return 0;
+}
+
+/* These tables from cdrecord - I don't have orange book */
+/* standard speed CD-RW (1-4x) */
+static char clv_to_speed[16] = {
+       /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+          0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* high speed CD-RW (-10x) */
+static char hs_clv_to_speed[16] = {
+       /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+          0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* ultra high speed CD-RW */
+static char us_clv_to_speed[16] = {
+       /* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+          0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
+};
+
+/*
+ * reads the maximum media speed from ATIP
+ */
+static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd,
+                                               unsigned *speed)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       unsigned char buf[64];
+       unsigned int size, st, sp;
+       int ret;
+
+       init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
+       cgc.sshdr = &sshdr;
+       cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+       cgc.cmd[1] = 2;
+       cgc.cmd[2] = 4; /* READ ATIP */
+       cgc.cmd[8] = 2;
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret) {
+               pkt_dump_sense(pd, &cgc);
+               return ret;
+       }
+       size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+       if (size > sizeof(buf))
+               size = sizeof(buf);
+
+       init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
+       cgc.sshdr = &sshdr;
+       cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+       cgc.cmd[1] = 2;
+       cgc.cmd[2] = 4;
+       cgc.cmd[8] = size;
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret) {
+               pkt_dump_sense(pd, &cgc);
+               return ret;
+       }
+
+       if (!(buf[6] & 0x40)) {
+               pkt_notice(pd, "disc type is not CD-RW\n");
+               return 1;
+       }
+       if (!(buf[6] & 0x4)) {
+               pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n");
+               return 1;
+       }
+
+       st = (buf[6] >> 3) & 0x7; /* disc sub-type */
+
+       sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
+
+       /* Info from cdrecord */
+       switch (st) {
+               case 0: /* standard speed */
+                       *speed = clv_to_speed[sp];
+                       break;
+               case 1: /* high speed */
+                       *speed = hs_clv_to_speed[sp];
+                       break;
+               case 2: /* ultra high speed */
+                       *speed = us_clv_to_speed[sp];
+                       break;
+               default:
+                       pkt_notice(pd, "unknown disc sub-type %d\n", st);
+                       return 1;
+       }
+       if (*speed) {
+               pkt_info(pd, "maximum media speed: %d\n", *speed);
+               return 0;
+       } else {
+               pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st);
+               return 1;
+       }
+}
+
+static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd)
+{
+       struct packet_command cgc;
+       struct scsi_sense_hdr sshdr;
+       int ret;
+
+       pkt_dbg(2, pd, "Performing OPC\n");
+
+       init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+       cgc.sshdr = &sshdr;
+       cgc.timeout = 60*HZ;
+       cgc.cmd[0] = GPCMD_SEND_OPC;
+       cgc.cmd[1] = 1;
+       ret = pkt_generic_packet(pd, &cgc);
+       if (ret)
+               pkt_dump_sense(pd, &cgc);
+       return ret;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+       int ret;
+       unsigned int write_speed, media_write_speed, read_speed;
+
+       ret = pkt_probe_settings(pd);
+       if (ret) {
+               pkt_dbg(2, pd, "failed probe\n");
+               return ret;
+       }
+
+       ret = pkt_set_write_settings(pd);
+       if (ret) {
+               pkt_dbg(1, pd, "failed saving write settings\n");
+               return -EIO;
+       }
+
+       pkt_write_caching(pd, USE_WCACHING);
+
+       ret = pkt_get_max_speed(pd, &write_speed);
+       if (ret)
+               write_speed = 16 * 177;
+       switch (pd->mmc3_profile) {
+               case 0x13: /* DVD-RW */
+               case 0x1a: /* DVD+RW */
+               case 0x12: /* DVD-RAM */
+                       pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed);
+                       break;
+               default:
+                       ret = pkt_media_speed(pd, &media_write_speed);
+                       if (ret)
+                               media_write_speed = 16;
+                       write_speed = min(write_speed, media_write_speed * 177);
+                       pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176);
+                       break;
+       }
+       read_speed = write_speed;
+
+       ret = pkt_set_speed(pd, write_speed, read_speed);
+       if (ret) {
+               pkt_dbg(1, pd, "couldn't set write speed\n");
+               return -EIO;
+       }
+       pd->write_speed = write_speed;
+       pd->read_speed = read_speed;
+
+       ret = pkt_perform_opc(pd);
+       if (ret) {
+               pkt_dbg(1, pd, "Optimum Power Calibration failed\n");
+       }
+
+       return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
+{
+       int ret;
+       long lba;
+       struct request_queue *q;
+       struct block_device *bdev;
+
+       /*
+        * We need to re-open the cdrom device without O_NONBLOCK to be able
+        * to read/write from/to it. It is already opened in O_NONBLOCK mode
+        * so open should not fail.
+        */
+       bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd);
+       if (IS_ERR(bdev)) {
+               ret = PTR_ERR(bdev);
+               goto out;
+       }
+
+       ret = pkt_get_last_written(pd, &lba);
+       if (ret) {
+               pkt_err(pd, "pkt_get_last_written failed\n");
+               goto out_putdev;
+       }
+
+       set_capacity(pd->disk, lba << 2);
+       set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
+
+       q = bdev_get_queue(pd->bdev);
+       if (write) {
+               ret = pkt_open_write(pd);
+               if (ret)
+                       goto out_putdev;
+               /*
+                * Some CDRW drives can not handle writes larger than one packet,
+                * even if the size is a multiple of the packet size.
+                */
+               blk_queue_max_hw_sectors(q, pd->settings.size);
+               set_bit(PACKET_WRITABLE, &pd->flags);
+       } else {
+               pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+               clear_bit(PACKET_WRITABLE, &pd->flags);
+       }
+
+       ret = pkt_set_segment_merging(pd, q);
+       if (ret)
+               goto out_putdev;
+
+       if (write) {
+               if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+                       pkt_err(pd, "not enough memory for buffers\n");
+                       ret = -ENOMEM;
+                       goto out_putdev;
+               }
+               pkt_info(pd, "%lukB available on disc\n", lba << 1);
+       }
+
+       return 0;
+
+out_putdev:
+       blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
+out:
+       return ret;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+       if (flush && pkt_flush_cache(pd))
+               pkt_dbg(1, pd, "not flushing cache\n");
+
+       pkt_lock_door(pd, 0);
+
+       pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+       blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL);
+
+       pkt_shrink_pktlist(pd);
+}
+
+static struct pktcdvd_device *pkt_find_dev_from_minor(unsigned int dev_minor)
+{
+       if (dev_minor >= MAX_WRITERS)
+               return NULL;
+
+       dev_minor = array_index_nospec(dev_minor, MAX_WRITERS);
+       return pkt_devs[dev_minor];
+}
+
+static int pkt_open(struct block_device *bdev, fmode_t mode)
+{
+       struct pktcdvd_device *pd = NULL;
+       int ret;
+
+       mutex_lock(&pktcdvd_mutex);
+       mutex_lock(&ctl_mutex);
+       pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev));
+       if (!pd) {
+               ret = -ENODEV;
+               goto out;
+       }
+       BUG_ON(pd->refcnt < 0);
+
+       pd->refcnt++;
+       if (pd->refcnt > 1) {
+               if ((mode & FMODE_WRITE) &&
+                   !test_bit(PACKET_WRITABLE, &pd->flags)) {
+                       ret = -EBUSY;
+                       goto out_dec;
+               }
+       } else {
+               ret = pkt_open_dev(pd, mode & FMODE_WRITE);
+               if (ret)
+                       goto out_dec;
+               /*
+                * needed here as well, since ext2 (among others) may change
+                * the blocksize at mount time
+                */
+               set_blocksize(bdev, CD_FRAMESIZE);
+       }
+
+       mutex_unlock(&ctl_mutex);
+       mutex_unlock(&pktcdvd_mutex);
+       return 0;
+
+out_dec:
+       pd->refcnt--;
+out:
+       mutex_unlock(&ctl_mutex);
+       mutex_unlock(&pktcdvd_mutex);
+       return ret;
+}
+
+static void pkt_close(struct gendisk *disk, fmode_t mode)
+{
+       struct pktcdvd_device *pd = disk->private_data;
+
+       mutex_lock(&pktcdvd_mutex);
+       mutex_lock(&ctl_mutex);
+       pd->refcnt--;
+       BUG_ON(pd->refcnt < 0);
+       if (pd->refcnt == 0) {
+               int flush = test_bit(PACKET_WRITABLE, &pd->flags);
+               pkt_release_dev(pd, flush);
+       }
+       mutex_unlock(&ctl_mutex);
+       mutex_unlock(&pktcdvd_mutex);
+}
+
+
+static void pkt_end_io_read_cloned(struct bio *bio)
+{
+       struct packet_stacked_data *psd = bio->bi_private;
+       struct pktcdvd_device *pd = psd->pd;
+
+       psd->bio->bi_status = bio->bi_status;
+       bio_put(bio);
+       bio_endio(psd->bio);
+       mempool_free(psd, &psd_pool);
+       pkt_bio_finished(pd);
+}
+
+static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
+{
+       struct bio *cloned_bio =
+               bio_alloc_clone(pd->bdev, bio, GFP_NOIO, &pkt_bio_set);
+       struct packet_stacked_data *psd = mempool_alloc(&psd_pool, GFP_NOIO);
+
+       psd->pd = pd;
+       psd->bio = bio;
+       cloned_bio->bi_private = psd;
+       cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+       pd->stats.secs_r += bio_sectors(bio);
+       pkt_queue_bio(pd, cloned_bio);
+}
+
+static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
+{
+       struct pktcdvd_device *pd = q->queuedata;
+       sector_t zone;
+       struct packet_data *pkt;
+       int was_empty, blocked_bio;
+       struct pkt_rb_node *node;
+
+       zone = get_zone(bio->bi_iter.bi_sector, pd);
+
+       /*
+        * If we find a matching packet in state WAITING or READ_WAIT, we can
+        * just append this bio to that packet.
+        */
+       spin_lock(&pd->cdrw.active_list_lock);
+       blocked_bio = 0;
+       list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+               if (pkt->sector == zone) {
+                       spin_lock(&pkt->lock);
+                       if ((pkt->state == PACKET_WAITING_STATE) ||
+                           (pkt->state == PACKET_READ_WAIT_STATE)) {
+                               bio_list_add(&pkt->orig_bios, bio);
+                               pkt->write_size +=
+                                       bio->bi_iter.bi_size / CD_FRAMESIZE;
+                               if ((pkt->write_size >= pkt->frames) &&
+                                   (pkt->state == PACKET_WAITING_STATE)) {
+                                       atomic_inc(&pkt->run_sm);
+                                       wake_up(&pd->wqueue);
+                               }
+                               spin_unlock(&pkt->lock);
+                               spin_unlock(&pd->cdrw.active_list_lock);
+                               return;
+                       } else {
+                               blocked_bio = 1;
+                       }
+                       spin_unlock(&pkt->lock);
+               }
+       }
+       spin_unlock(&pd->cdrw.active_list_lock);
+
+       /*
+        * Test if there is enough room left in the bio work queue
+        * (queue size >= congestion on mark).
+        * If not, wait till the work queue size is below the congestion off mark.
+        */
+       spin_lock(&pd->lock);
+       if (pd->write_congestion_on > 0
+           && pd->bio_queue_size >= pd->write_congestion_on) {
+               struct wait_bit_queue_entry wqe;
+
+               init_wait_var_entry(&wqe, &pd->congested, 0);
+               for (;;) {
+                       prepare_to_wait_event(__var_waitqueue(&pd->congested),
+                                             &wqe.wq_entry,
+                                             TASK_UNINTERRUPTIBLE);
+                       if (pd->bio_queue_size <= pd->write_congestion_off)
+                               break;
+                       pd->congested = true;
+                       spin_unlock(&pd->lock);
+                       schedule();
+                       spin_lock(&pd->lock);
+               }
+       }
+       spin_unlock(&pd->lock);
+
+       /*
+        * No matching packet found. Store the bio in the work queue.
+        */
+       node = mempool_alloc(&pd->rb_pool, GFP_NOIO);
+       node->bio = bio;
+       spin_lock(&pd->lock);
+       BUG_ON(pd->bio_queue_size < 0);
+       was_empty = (pd->bio_queue_size == 0);
+       pkt_rbtree_insert(pd, node);
+       spin_unlock(&pd->lock);
+
+       /*
+        * Wake up the worker thread.
+        */
+       atomic_set(&pd->scan_queue, 1);
+       if (was_empty) {
+               /* This wake_up is required for correct operation */
+               wake_up(&pd->wqueue);
+       } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
+               /*
+                * This wake up is not required for correct operation,
+                * but improves performance in some cases.
+                */
+               wake_up(&pd->wqueue);
+       }
+}
+
+static void pkt_submit_bio(struct bio *bio)
+{
+       struct pktcdvd_device *pd = bio->bi_bdev->bd_disk->queue->queuedata;
+       struct bio *split;
+
+       bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
+
+       pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
+               (unsigned long long)bio->bi_iter.bi_sector,
+               (unsigned long long)bio_end_sector(bio));
+
+       /*
+        * Clone READ bios so we can have our own bi_end_io callback.
+        */
+       if (bio_data_dir(bio) == READ) {
+               pkt_make_request_read(pd, bio);
+               return;
+       }
+
+       if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+               pkt_notice(pd, "WRITE for ro device (%llu)\n",
+                          (unsigned long long)bio->bi_iter.bi_sector);
+               goto end_io;
+       }
+
+       if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
+               pkt_err(pd, "wrong bio size\n");
+               goto end_io;
+       }
+
+       do {
+               sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
+               sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
+
+               if (last_zone != zone) {
+                       BUG_ON(last_zone != zone + pd->settings.size);
+
+                       split = bio_split(bio, last_zone -
+                                         bio->bi_iter.bi_sector,
+                                         GFP_NOIO, &pkt_bio_set);
+                       bio_chain(split, bio);
+               } else {
+                       split = bio;
+               }
+
+               pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
+       } while (split != bio);
+
+       return;
+end_io:
+       bio_io_error(bio);
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+       struct request_queue *q = pd->disk->queue;
+
+       blk_queue_logical_block_size(q, CD_FRAMESIZE);
+       blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS);
+       q->queuedata = pd;
+}
+
+static int pkt_seq_show(struct seq_file *m, void *p)
+{
+       struct pktcdvd_device *pd = m->private;
+       char *msg;
+       int states[PACKET_NUM_STATES];
+
+       seq_printf(m, "Writer %s mapped to %pg:\n", pd->name, pd->bdev);
+
+       seq_printf(m, "\nSettings:\n");
+       seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+       if (pd->settings.write_type == 0)
+               msg = "Packet";
+       else
+               msg = "Unknown";
+       seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+       seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+       seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+       seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+       if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+               msg = "Mode 1";
+       else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+               msg = "Mode 2";
+       else
+               msg = "Unknown";
+       seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+       seq_printf(m, "\nStatistics:\n");
+       seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+       seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+       seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+       seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+       seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+       seq_printf(m, "\nMisc:\n");
+       seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+       seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+       seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+       seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+       seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+       seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+       seq_printf(m, "\nQueue state:\n");
+       seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+       seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+       seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
+
+       pkt_count_states(pd, states);
+       seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+                  states[0], states[1], states[2], states[3], states[4], states[5]);
+
+       seq_printf(m, "\twrite congestion marks:\toff=%d on=%d\n",
+                       pd->write_congestion_off,
+                       pd->write_congestion_on);
+       return 0;
+}
+
+static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+{
+       int i;
+       struct block_device *bdev;
+       struct scsi_device *sdev;
+
+       if (pd->pkt_dev == dev) {
+               pkt_err(pd, "recursive setup not allowed\n");
+               return -EBUSY;
+       }
+       for (i = 0; i < MAX_WRITERS; i++) {
+               struct pktcdvd_device *pd2 = pkt_devs[i];
+               if (!pd2)
+                       continue;
+               if (pd2->bdev->bd_dev == dev) {
+                       pkt_err(pd, "%pg already setup\n", pd2->bdev);
+                       return -EBUSY;
+               }
+               if (pd2->pkt_dev == dev) {
+                       pkt_err(pd, "can't chain pktcdvd devices\n");
+                       return -EBUSY;
+               }
+       }
+
+       bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
+       if (IS_ERR(bdev))
+               return PTR_ERR(bdev);
+       sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+       if (!sdev) {
+               blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+               return -EINVAL;
+       }
+       put_device(&sdev->sdev_gendev);
+
+       /* This is safe, since we have a reference from open(). */
+       __module_get(THIS_MODULE);
+
+       pd->bdev = bdev;
+       set_blocksize(bdev, CD_FRAMESIZE);
+
+       pkt_init_queue(pd);
+
+       atomic_set(&pd->cdrw.pending_bios, 0);
+       pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+       if (IS_ERR(pd->cdrw.thread)) {
+               pkt_err(pd, "can't start kernel thread\n");
+               goto out_mem;
+       }
+
+       proc_create_single_data(pd->name, 0, pkt_proc, pkt_seq_show, pd);
+       pkt_dbg(1, pd, "writer mapped to %pg\n", bdev);
+       return 0;
+
+out_mem:
+       blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
+       /* This is safe: open() is still holding a reference. */
+       module_put(THIS_MODULE);
+       return -ENOMEM;
+}
+
+static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
+{
+       struct pktcdvd_device *pd = bdev->bd_disk->private_data;
+       int ret;
+
+       pkt_dbg(2, pd, "cmd %x, dev %d:%d\n",
+               cmd, MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
+
+       mutex_lock(&pktcdvd_mutex);
+       switch (cmd) {
+       case CDROMEJECT:
+               /*
+                * The door gets locked when the device is opened, so we
+                * have to unlock it or else the eject command fails.
+                */
+               if (pd->refcnt == 1)
+                       pkt_lock_door(pd, 0);
+               fallthrough;
+       /*
+        * forward selected CDROM ioctls to CD-ROM, for UDF
+        */
+       case CDROMMULTISESSION:
+       case CDROMREADTOCENTRY:
+       case CDROM_LAST_WRITTEN:
+       case CDROM_SEND_PACKET:
+       case SCSI_IOCTL_SEND_COMMAND:
+               if (!bdev->bd_disk->fops->ioctl)
+                       ret = -ENOTTY;
+               else
+                       ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
+               break;
+       default:
+               pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
+               ret = -ENOTTY;
+       }
+       mutex_unlock(&pktcdvd_mutex);
+
+       return ret;
+}
+
+static unsigned int pkt_check_events(struct gendisk *disk,
+                                    unsigned int clearing)
+{
+       struct pktcdvd_device *pd = disk->private_data;
+       struct gendisk *attached_disk;
+
+       if (!pd)
+               return 0;
+       if (!pd->bdev)
+               return 0;
+       attached_disk = pd->bdev->bd_disk;
+       if (!attached_disk || !attached_disk->fops->check_events)
+               return 0;
+       return attached_disk->fops->check_events(attached_disk, clearing);
+}
+
+static char *pkt_devnode(struct gendisk *disk, umode_t *mode)
+{
+       return kasprintf(GFP_KERNEL, "pktcdvd/%s", disk->disk_name);
+}
+
+static const struct block_device_operations pktcdvd_ops = {
+       .owner =                THIS_MODULE,
+       .submit_bio =           pkt_submit_bio,
+       .open =                 pkt_open,
+       .release =              pkt_close,
+       .ioctl =                pkt_ioctl,
+       .compat_ioctl =         blkdev_compat_ptr_ioctl,
+       .check_events =         pkt_check_events,
+       .devnode =              pkt_devnode,
+};
+
+/*
+ * Set up mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
+{
+       int idx;
+       int ret = -ENOMEM;
+       struct pktcdvd_device *pd;
+       struct gendisk *disk;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       for (idx = 0; idx < MAX_WRITERS; idx++)
+               if (!pkt_devs[idx])
+                       break;
+       if (idx == MAX_WRITERS) {
+               pr_err("max %d writers supported\n", MAX_WRITERS);
+               ret = -EBUSY;
+               goto out_mutex;
+       }
+
+       pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+       if (!pd)
+               goto out_mutex;
+
+       ret = mempool_init_kmalloc_pool(&pd->rb_pool, PKT_RB_POOL_SIZE,
+                                       sizeof(struct pkt_rb_node));
+       if (ret)
+               goto out_mem;
+
+       INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+       INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+       spin_lock_init(&pd->cdrw.active_list_lock);
+
+       spin_lock_init(&pd->lock);
+       spin_lock_init(&pd->iosched.lock);
+       bio_list_init(&pd->iosched.read_queue);
+       bio_list_init(&pd->iosched.write_queue);
+       sprintf(pd->name, DRIVER_NAME"%d", idx);
+       init_waitqueue_head(&pd->wqueue);
+       pd->bio_queue = RB_ROOT;
+
+       pd->write_congestion_on  = write_congestion_on;
+       pd->write_congestion_off = write_congestion_off;
+
+       ret = -ENOMEM;
+       disk = blk_alloc_disk(NUMA_NO_NODE);
+       if (!disk)
+               goto out_mem;
+       pd->disk = disk;
+       disk->major = pktdev_major;
+       disk->first_minor = idx;
+       disk->minors = 1;
+       disk->fops = &pktcdvd_ops;
+       disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
+       strcpy(disk->disk_name, pd->name);
+       disk->private_data = pd;
+
+       pd->pkt_dev = MKDEV(pktdev_major, idx);
+       ret = pkt_new_dev(pd, dev);
+       if (ret)
+               goto out_mem2;
+
+       /* inherit events of the host device */
+       disk->events = pd->bdev->bd_disk->events;
+
+       ret = add_disk(disk);
+       if (ret)
+               goto out_mem2;
+
+       pkt_sysfs_dev_new(pd);
+       pkt_debugfs_dev_new(pd);
+
+       pkt_devs[idx] = pd;
+       if (pkt_dev)
+               *pkt_dev = pd->pkt_dev;
+
+       mutex_unlock(&ctl_mutex);
+       return 0;
+
+out_mem2:
+       put_disk(disk);
+out_mem:
+       mempool_exit(&pd->rb_pool);
+       kfree(pd);
+out_mutex:
+       mutex_unlock(&ctl_mutex);
+       pr_err("setup of pktcdvd device failed\n");
+       return ret;
+}
+
+/*
+ * Tear down mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_remove_dev(dev_t pkt_dev)
+{
+       struct pktcdvd_device *pd;
+       int idx;
+       int ret = 0;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       for (idx = 0; idx < MAX_WRITERS; idx++) {
+               pd = pkt_devs[idx];
+               if (pd && (pd->pkt_dev == pkt_dev))
+                       break;
+       }
+       if (idx == MAX_WRITERS) {
+               pr_debug("dev not setup\n");
+               ret = -ENXIO;
+               goto out;
+       }
+
+       if (pd->refcnt > 0) {
+               ret = -EBUSY;
+               goto out;
+       }
+       if (!IS_ERR(pd->cdrw.thread))
+               kthread_stop(pd->cdrw.thread);
+
+       pkt_devs[idx] = NULL;
+
+       pkt_debugfs_dev_remove(pd);
+       pkt_sysfs_dev_remove(pd);
+
+       blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY);
+
+       remove_proc_entry(pd->name, pkt_proc);
+       pkt_dbg(1, pd, "writer unmapped\n");
+
+       del_gendisk(pd->disk);
+       put_disk(pd->disk);
+
+       mempool_exit(&pd->rb_pool);
+       kfree(pd);
+
+       /* This is safe: open() is still holding a reference. */
+       module_put(THIS_MODULE);
+
+out:
+       mutex_unlock(&ctl_mutex);
+       return ret;
+}
+
+static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
+{
+       struct pktcdvd_device *pd;
+
+       mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
+
+       pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
+       if (pd) {
+               ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+               ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+       } else {
+               ctrl_cmd->dev = 0;
+               ctrl_cmd->pkt_dev = 0;
+       }
+       ctrl_cmd->num_devices = MAX_WRITERS;
+
+       mutex_unlock(&ctl_mutex);
+}
+
+static long pkt_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+       struct pkt_ctrl_command ctrl_cmd;
+       int ret = 0;
+       dev_t pkt_dev = 0;
+
+       if (cmd != PACKET_CTRL_CMD)
+               return -ENOTTY;
+
+       if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
+               return -EFAULT;
+
+       switch (ctrl_cmd.command) {
+       case PKT_CTRL_CMD_SETUP:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               ret = pkt_setup_dev(new_decode_dev(ctrl_cmd.dev), &pkt_dev);
+               ctrl_cmd.pkt_dev = new_encode_dev(pkt_dev);
+               break;
+       case PKT_CTRL_CMD_TEARDOWN:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               ret = pkt_remove_dev(new_decode_dev(ctrl_cmd.pkt_dev));
+               break;
+       case PKT_CTRL_CMD_STATUS:
+               pkt_get_status(&ctrl_cmd);
+               break;
+       default:
+               return -ENOTTY;
+       }
+
+       if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
+               return -EFAULT;
+       return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static long pkt_ctl_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+       return pkt_ctl_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations pkt_ctl_fops = {
+       .open           = nonseekable_open,
+       .unlocked_ioctl = pkt_ctl_ioctl,
+#ifdef CONFIG_COMPAT
+       .compat_ioctl   = pkt_ctl_compat_ioctl,
+#endif
+       .owner          = THIS_MODULE,
+       .llseek         = no_llseek,
+};
+
+static struct miscdevice pkt_misc = {
+       .minor          = MISC_DYNAMIC_MINOR,
+       .name           = DRIVER_NAME,
+       .nodename       = "pktcdvd/control",
+       .fops           = &pkt_ctl_fops
+};
+
+static int __init pkt_init(void)
+{
+       int ret;
+
+       mutex_init(&ctl_mutex);
+
+       ret = mempool_init_kmalloc_pool(&psd_pool, PSD_POOL_SIZE,
+                                   sizeof(struct packet_stacked_data));
+       if (ret)
+               return ret;
+       ret = bioset_init(&pkt_bio_set, BIO_POOL_SIZE, 0, 0);
+       if (ret) {
+               mempool_exit(&psd_pool);
+               return ret;
+       }
+
+       ret = register_blkdev(pktdev_major, DRIVER_NAME);
+       if (ret < 0) {
+               pr_err("unable to register block device\n");
+               goto out2;
+       }
+       if (!pktdev_major)
+               pktdev_major = ret;
+
+       ret = pkt_sysfs_init();
+       if (ret)
+               goto out;
+
+       pkt_debugfs_init();
+
+       ret = misc_register(&pkt_misc);
+       if (ret) {
+               pr_err("unable to register misc device\n");
+               goto out_misc;
+       }
+
+       pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
+
+       return 0;
+
+out_misc:
+       pkt_debugfs_cleanup();
+       pkt_sysfs_cleanup();
+out:
+       unregister_blkdev(pktdev_major, DRIVER_NAME);
+out2:
+       mempool_exit(&psd_pool);
+       bioset_exit(&pkt_bio_set);
+       return ret;
+}
+
+static void __exit pkt_exit(void)
+{
+       remove_proc_entry("driver/"DRIVER_NAME, NULL);
+       misc_deregister(&pkt_misc);
+
+       pkt_debugfs_cleanup();
+       pkt_sysfs_cleanup();
+
+       unregister_blkdev(pktdev_major, DRIVER_NAME);
+       mempool_exit(&psd_pool);
+       bioset_exit(&pkt_bio_set);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
index c76e0148eada3302a04b81a886fa7cdb34df2de1..574e470b220b09bf3e032f14a60998cfe5d833ce 100644 (file)
@@ -587,6 +587,8 @@ static void ps3vram_submit_bio(struct bio *bio)
        dev_dbg(&dev->core, "%s\n", __func__);
 
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        spin_lock_irq(&priv->lock);
        busy = !bio_list_empty(&priv->list);
index 78334da74d8bf90dcfd1f51d5ccd26f71a289a0c..5eb8c7855970d1ab277798056b0fb3db2ba151be 100644 (file)
@@ -1440,7 +1440,7 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
                goto out_alloc;
        }
 
-       ret = ida_alloc_max(&index_ida, 1 << (MINORBITS - RNBD_PART_BITS),
+       ret = ida_alloc_max(&index_ida, (1 << (MINORBITS - RNBD_PART_BITS)) - 1,
                            GFP_KERNEL);
        if (ret < 0) {
                pr_err("Failed to initialize device '%s' from session %s, allocating idr failed, err: %d\n",
index e9de9d846b730359623d5b451015c169937f9685..17b677b5d3b221fac658268958730dbc71dade52 100644 (file)
@@ -1992,6 +1992,9 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
        struct ublksrv_ctrl_cmd *header = (struct ublksrv_ctrl_cmd *)cmd->cmd;
        int ret = -EINVAL;
 
+       if (issue_flags & IO_URING_F_NONBLOCK)
+               return -EAGAIN;
+
        ublk_ctrl_cmd_dump(cmd);
 
        if (!(issue_flags & IO_URING_F_SQE128))
index 68bd2f7961b3f5ad7d64843f2fbc659984cd3c92..6a77fa9174288089f803e3a698c0382e2d128381 100644 (file)
@@ -315,22 +315,35 @@ static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
                virtqueue_notify(vq->vq);
 }
 
+static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
+{
+       virtblk_cleanup_cmd(req);
+       switch (rc) {
+       case -ENOSPC:
+               return BLK_STS_DEV_RESOURCE;
+       case -ENOMEM:
+               return BLK_STS_RESOURCE;
+       default:
+               return BLK_STS_IOERR;
+       }
+}
+
 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
                                        struct virtio_blk *vblk,
                                        struct request *req,
                                        struct virtblk_req *vbr)
 {
        blk_status_t status;
+       int num;
 
        status = virtblk_setup_cmd(vblk->vdev, req, vbr);
        if (unlikely(status))
                return status;
 
-       vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
-       if (unlikely(vbr->sg_table.nents < 0)) {
-               virtblk_cleanup_cmd(req);
-               return BLK_STS_RESOURCE;
-       }
+       num = virtblk_map_data(hctx, req, vbr);
+       if (unlikely(num < 0))
+               return virtblk_fail_to_queue(req, -ENOMEM);
+       vbr->sg_table.nents = num;
 
        blk_mq_start_request(req);
 
@@ -364,15 +377,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
                        blk_mq_stop_hw_queue(hctx);
                spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
                virtblk_unmap_data(req, vbr);
-               virtblk_cleanup_cmd(req);
-               switch (err) {
-               case -ENOSPC:
-                       return BLK_STS_DEV_RESOURCE;
-               case -ENOMEM:
-                       return BLK_STS_RESOURCE;
-               default:
-                       return BLK_STS_IOERR;
-               }
+               return virtblk_fail_to_queue(req, err);
        }
 
        if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -991,7 +996,7 @@ static int virtblk_probe(struct virtio_device *vdev)
        blk_queue_max_segments(q, sg_elems);
 
        /* No real sector limit. */
-       blk_queue_max_hw_sectors(q, -1U);
+       blk_queue_max_hw_sectors(q, UINT_MAX);
 
        max_size = virtio_max_dma_size(vdev);
 
index c0227dfa468879cb3af300caba282b620f3ac824..4807af1d58059394d7a992335dabaf2bc3901721 100644 (file)
@@ -524,7 +524,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
        return 0;
 }
 
-static int xen_blkbk_remove(struct xenbus_device *dev)
+static void xen_blkbk_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
@@ -547,8 +547,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
                /* Put the reference we set in xen_blkif_alloc(). */
                xen_blkif_put(be->blkif);
        }
-
-       return 0;
 }
 
 int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
index b28489290323faeeb74302570d4a971493c92c14..23ed258b57f0e585e122a60d6d4dba7cc2972f02 100644 (file)
@@ -2467,7 +2467,7 @@ static void blkback_changed(struct xenbus_device *dev,
        }
 }
 
-static int blkfront_remove(struct xenbus_device *xbdev)
+static void blkfront_remove(struct xenbus_device *xbdev)
 {
        struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
 
@@ -2488,7 +2488,6 @@ static int blkfront_remove(struct xenbus_device *xbdev)
        }
 
        kfree(info);
-       return 0;
 }
 
 static int blkfront_is_ready(struct xenbus_device *dev)
index 6eddc23e49d9a7dc30eb24d6614e0402baa6bd91..bbe9cf1cae27f6088cea5cdd5cd71983fdded295 100644 (file)
@@ -2164,10 +2164,17 @@ static void qca_serdev_shutdown(struct device *dev)
        int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS);
        struct serdev_device *serdev = to_serdev_device(dev);
        struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev);
+       struct hci_uart *hu = &qcadev->serdev_hu;
+       struct hci_dev *hdev = hu->hdev;
+       struct qca_data *qca = hu->priv;
        const u8 ibs_wake_cmd[] = { 0xFD };
        const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 };
 
        if (qcadev->btsoc_type == QCA_QCA6390) {
+               if (test_bit(QCA_BT_OFF, &qca->flags) ||
+                   !test_bit(HCI_RUNNING, &hdev->flags))
+                       return;
+
                serdev_device_write_flush(serdev);
                ret = serdev_device_write_buf(serdev, ibs_wake_cmd,
                                              sizeof(ibs_wake_cmd));
index 3aa91aed3bf733a4876e5d36c8badf13150a771b..226e87b85116ea700efa5db36d89567cf89a97d0 100644 (file)
@@ -857,7 +857,13 @@ static int __init sunxi_rsb_init(void)
                return ret;
        }
 
-       return platform_driver_register(&sunxi_rsb_driver);
+       ret = platform_driver_register(&sunxi_rsb_driver);
+       if (ret) {
+               bus_unregister(&sunxi_rsb_bus);
+               return ret;
+       }
+
+       return 0;
 }
 module_init(sunxi_rsb_init);
 
index d69905233aff2da31bfd55d6f53a62e5a5cb90b4..7e513b7718320c0392385a7d322e97b05e868f11 100644 (file)
@@ -412,7 +412,9 @@ int tpm_pm_suspend(struct device *dev)
        }
 
 suspended:
-       return rc;
+       if (rc)
+               dev_err(dev, "Ignoring error %d while suspending\n", rc);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(tpm_pm_suspend);
 
index 3792918262617983cd21d424a736955c395400b8..80cca3b83b226aa10918fc6e7a7468a43bd91996 100644 (file)
@@ -360,14 +360,13 @@ static int tpmfront_probe(struct xenbus_device *dev,
        return tpm_chip_register(priv->chip);
 }
 
-static int tpmfront_remove(struct xenbus_device *dev)
+static void tpmfront_remove(struct xenbus_device *dev)
 {
        struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
        struct tpm_private *priv = dev_get_drvdata(&chip->dev);
        tpm_chip_unregister(chip);
        ring_free(priv);
        dev_set_drvdata(&chip->dev, NULL);
-       return 0;
 }
 
 static int tpmfront_resume(struct xenbus_device *dev)
index fcfc2e299110eddbb009969af8829bb859a49690..27f3890f471df7a78e7b8e27d0755a295ab8caad 100644 (file)
@@ -58,7 +58,7 @@
 #define PCI1760_CMD_CLR_IMB2           0x00    /* Clears IMB2 */
 #define PCI1760_CMD_SET_DO             0x01    /* Set output state */
 #define PCI1760_CMD_GET_DO             0x02    /* Read output status */
-#define PCI1760_CMD_GET_STATUS         0x03    /* Read current status */
+#define PCI1760_CMD_GET_STATUS         0x07    /* Read current status */
 #define PCI1760_CMD_GET_FW_VER         0x0e    /* Read firmware version */
 #define PCI1760_CMD_GET_HW_VER         0x0f    /* Read hardware version */
 #define PCI1760_CMD_SET_PWM_HI(x)      (0x10 + (x) * 2) /* Set "hi" period */
index 204e39006dda8f102643ecd90eb45fbb3df70218..c17bd845f5fcb8afa9c64662f6a13903015ac815 100644 (file)
@@ -307,6 +307,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
                max_perf = min_perf;
 
        amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
+       cpufreq_cpu_put(policy);
 }
 
 static int amd_get_min_freq(struct amd_cpudata *cpudata)
index d1801281cdd992106006250a4a395e828e36fcc5..c11d22fd84c3738584144377d2934b83d27c8f6a 100644 (file)
@@ -280,6 +280,7 @@ static int apple_soc_cpufreq_init(struct cpufreq_policy *policy)
        policy->cpuinfo.transition_latency = transition_latency;
        policy->dvfs_possible_from_any_cpu = true;
        policy->fast_switch_possible = true;
+       policy->suspend_freq = freq_table[0].frequency;
 
        if (policy_has_boost_freq(policy)) {
                ret = cpufreq_enable_boost_support();
@@ -321,7 +322,6 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .flags          = CPUFREQ_HAVE_GOVERNOR_PER_POLICY |
                          CPUFREQ_NEED_INITIAL_FREQ_CHECK | CPUFREQ_IS_COOLING_DEV,
        .verify         = cpufreq_generic_frequency_table_verify,
-       .attr           = cpufreq_generic_attr,
        .get            = apple_soc_cpufreq_get_rate,
        .init           = apple_soc_cpufreq_init,
        .exit           = apple_soc_cpufreq_exit,
@@ -329,6 +329,7 @@ static struct cpufreq_driver apple_soc_cpufreq_driver = {
        .fast_switch    = apple_soc_cpufreq_fast_switch,
        .register_em    = cpufreq_register_em_with_opp,
        .attr           = apple_soc_cpufreq_hw_attr,
+       .suspend        = cpufreq_generic_suspend,
 };
 
 static int __init apple_soc_cpufreq_module_init(void)
index c10fc33b29b181aa725c202ca734b95227836533..b74289a95a17178b7e2177e1bf3ad0b12bb307b0 100644 (file)
@@ -445,7 +445,7 @@ static int __init armada37xx_cpufreq_driver_init(void)
                return -ENODEV;
        }
 
-       clk = clk_get(cpu_dev, 0);
+       clk = clk_get(cpu_dev, NULL);
        if (IS_ERR(clk)) {
                dev_err(cpu_dev, "Cannot get clock for CPU0\n");
                return PTR_ERR(clk);
index 432dfb4e8027e96b908c420cd40f0d0eb911bfef..022e3555407c867bd4d0d2508571fedd0f22a0f7 100644 (file)
@@ -487,7 +487,8 @@ static unsigned int get_perf_level_count(struct cpufreq_policy *policy)
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf, perf_caps->highest_perf);
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
        if ((min_cap == 0) || (max_cap < min_cap))
                return 0;
        return 1 + max_cap / CPPC_EM_CAP_STEP - min_cap / CPPC_EM_CAP_STEP;
@@ -519,10 +520,10 @@ static int cppc_get_cpu_power(struct device *cpu_dev,
        cpu_data = policy->driver_data;
        perf_caps = &cpu_data->perf_caps;
        max_cap = arch_scale_cpu_capacity(cpu_dev->id);
-       min_cap = div_u64(max_cap * perf_caps->lowest_perf,
-                       perf_caps->highest_perf);
-
-       perf_step = CPPC_EM_CAP_STEP * perf_caps->highest_perf / max_cap;
+       min_cap = div_u64((u64)max_cap * perf_caps->lowest_perf,
+                         perf_caps->highest_perf);
+       perf_step = div_u64((u64)CPPC_EM_CAP_STEP * perf_caps->highest_perf,
+                           max_cap);
        min_step = min_cap / CPPC_EM_CAP_STEP;
        max_step = max_cap / CPPC_EM_CAP_STEP;
 
index 8ab67288304369d5c3e644844a412bdd7fd8b629..e8570365109869d4063339d877b3c3110b75b9b3 100644 (file)
@@ -137,6 +137,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "nvidia,tegra30", },
        { .compatible = "nvidia,tegra124", },
        { .compatible = "nvidia,tegra210", },
+       { .compatible = "nvidia,tegra234", },
 
        { .compatible = "qcom,apq8096", },
        { .compatible = "qcom,msm8996", },
@@ -150,6 +151,7 @@ static const struct of_device_id blocklist[] __initconst = {
        { .compatible = "qcom,sdm845", },
        { .compatible = "qcom,sm6115", },
        { .compatible = "qcom,sm6350", },
+       { .compatible = "qcom,sm6375", },
        { .compatible = "qcom,sm8150", },
        { .compatible = "qcom,sm8250", },
        { .compatible = "qcom,sm8350", },
index 340fed35e45dde64081837df046ae729459c9745..9505a812d6a1d230f1fdf23b736a3f910268990a 100644 (file)
@@ -649,9 +649,10 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
 {
        struct clk_hw_onecell_data *clk_data;
        struct device *dev = &pdev->dev;
+       struct device_node *soc_node;
        struct device *cpu_dev;
        struct clk *clk;
-       int ret, i, num_domains;
+       int ret, i, num_domains, reg_sz;
 
        clk = clk_get(dev, "xo");
        if (IS_ERR(clk))
@@ -679,7 +680,21 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
                return ret;
 
        /* Allocate qcom_cpufreq_data based on the available frequency domains in DT */
-       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * 4);
+       soc_node = of_get_parent(dev->of_node);
+       if (!soc_node)
+               return -EINVAL;
+
+       ret = of_property_read_u32(soc_node, "#address-cells", &reg_sz);
+       if (ret)
+               goto of_exit;
+
+       ret = of_property_read_u32(soc_node, "#size-cells", &i);
+       if (ret)
+               goto of_exit;
+
+       reg_sz += i;
+
+       num_domains = of_property_count_elems_of_size(dev->of_node, "reg", sizeof(u32) * reg_sz);
        if (num_domains <= 0)
                return num_domains;
 
@@ -743,6 +758,9 @@ static int qcom_cpufreq_hw_driver_probe(struct platform_device *pdev)
        else
                dev_dbg(dev, "QCOM CPUFreq HW driver initialized\n");
 
+of_exit:
+       of_node_put(soc_node);
+
        return ret;
 }
 
index 53100fb9b07bdb9fe360916f93d76c46c46cb4cc..12205e2b53b4574851ced620eeda4f4c174b231b 100644 (file)
@@ -3,7 +3,7 @@
  * Microchip / Atmel ECC (I2C) driver.
  *
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #include <linux/delay.h>
@@ -411,6 +411,6 @@ static void __exit atmel_ecc_exit(void)
 module_init(atmel_ecc_init);
 module_exit(atmel_ecc_exit);
 
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
 MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
 MODULE_LICENSE("GPL v2");
index 81ce09bedda8f4348ba25ca8a676a92f83d2c96b..55bff1e13142606e650635f1b5345e75f16b061f 100644 (file)
@@ -3,7 +3,7 @@
  * Microchip / Atmel ECC (I2C) driver.
  *
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #include <linux/bitrev.h>
@@ -390,6 +390,6 @@ static void __exit atmel_i2c_exit(void)
 module_init(atmel_i2c_init);
 module_exit(atmel_i2c_exit);
 
-MODULE_AUTHOR("Tudor Ambarus <tudor.ambarus@microchip.com>");
+MODULE_AUTHOR("Tudor Ambarus");
 MODULE_DESCRIPTION("Microchip / Atmel ECC (I2C) driver");
 MODULE_LICENSE("GPL v2");
index 48929efe2a5bf20112c1e0cc2ec178b7fb1ad107..35f7857a7f7cc1c3fff0866a83dbfa5663df6086 100644 (file)
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (c) 2017, Microchip Technology Inc.
- * Author: Tudor Ambarus <tudor.ambarus@microchip.com>
+ * Author: Tudor Ambarus
  */
 
 #ifndef __ATMEL_I2C_H__
index 1f65df48984783a992d8e6d77dcdd7e6d803ff0d..f46b161d2cda65c03446505f32234bc20ab8057f 100644 (file)
@@ -104,7 +104,7 @@ int caam_process_blob(struct caam_blob_priv *priv,
        }
 
        ctrlpriv = dev_get_drvdata(jrdev->parent);
-       moo = FIELD_GET(CSTA_MOO, ioread32(&ctrlpriv->ctrl->perfmon.status));
+       moo = FIELD_GET(CSTA_MOO, rd_reg32(&ctrlpriv->ctrl->perfmon.status));
        if (moo != CSTA_MOO_SECURE && moo != CSTA_MOO_TRUSTED)
                dev_warn(jrdev,
                         "using insecure test key, enable HAB to use unique device key!\n");
index e553ccadbcbc859a38ce112ca6ef8df2407f10a6..e5876286828b8e53e0dba6fa705090b299dffc66 100644 (file)
@@ -239,7 +239,8 @@ static int virtio_crypto_alg_skcipher_close_session(
                pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
                        ctrl_status->status, destroy_session->session_id);
 
-               return -EINVAL;
+               err = -EINVAL;
+               goto out;
        }
 
        err = 0;
index f69d68122b9b1313ebc6f94cfb12d7c707cebe2e..fbf725fae7c1c39480d59c2ce5e610fcf5b1874c 100644 (file)
@@ -168,14 +168,11 @@ void dma_buf_uninit_sysfs_statistics(void)
        kset_unregister(dma_buf_stats_kset);
 }
 
-int dma_buf_stats_setup(struct dma_buf *dmabuf)
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
 {
        struct dma_buf_sysfs_entry *sysfs_entry;
        int ret;
 
-       if (!dmabuf || !dmabuf->file)
-               return -EINVAL;
-
        if (!dmabuf->exp_name) {
                pr_err("exporter name must not be empty if stats needed\n");
                return -EINVAL;
@@ -192,7 +189,7 @@ int dma_buf_stats_setup(struct dma_buf *dmabuf)
 
        /* create the directory for buffer stats */
        ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
-                                  "%lu", file_inode(dmabuf->file)->i_ino);
+                                  "%lu", file_inode(file)->i_ino);
        if (ret)
                goto err_sysfs_dmabuf;
 
index a49c6e2650cccddb5eddf639bca14df89a36050b..7a8a995b75bae10621a783f583d0f46a1feb792c 100644 (file)
@@ -13,7 +13,7 @@
 int dma_buf_init_sysfs_statistics(void);
 void dma_buf_uninit_sysfs_statistics(void);
 
-int dma_buf_stats_setup(struct dma_buf *dmabuf);
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
 
 void dma_buf_stats_teardown(struct dma_buf *dmabuf);
 #else
@@ -25,7 +25,7 @@ static inline int dma_buf_init_sysfs_statistics(void)
 
 static inline void dma_buf_uninit_sysfs_statistics(void) {}
 
-static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
+static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
 {
        return 0;
 }
index b6c36914e7c611c848f51afcfa57725657c372eb..e6528767efc7c340a51d6aa9e8bd05b6c3615936 100644 (file)
@@ -95,10 +95,11 @@ static int dma_buf_file_release(struct inode *inode, struct file *file)
                return -EINVAL;
 
        dmabuf = file->private_data;
-
-       mutex_lock(&db_list.lock);
-       list_del(&dmabuf->list_node);
-       mutex_unlock(&db_list.lock);
+       if (dmabuf) {
+               mutex_lock(&db_list.lock);
+               list_del(&dmabuf->list_node);
+               mutex_unlock(&db_list.lock);
+       }
 
        return 0;
 }
@@ -528,17 +529,17 @@ static inline int is_dma_buf_file(struct file *file)
        return file->f_op == &dma_buf_fops;
 }
 
-static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
+static struct file *dma_buf_getfile(size_t size, int flags)
 {
        static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
-       struct file *file;
        struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+       struct file *file;
 
        if (IS_ERR(inode))
                return ERR_CAST(inode);
 
-       inode->i_size = dmabuf->size;
-       inode_set_bytes(inode, dmabuf->size);
+       inode->i_size = size;
+       inode_set_bytes(inode, size);
 
        /*
         * The ->i_ino acquired from get_next_ino() is not unique thus
@@ -552,8 +553,6 @@ static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
                                 flags, &dma_buf_fops);
        if (IS_ERR(file))
                goto err_alloc_file;
-       file->private_data = dmabuf;
-       file->f_path.dentry->d_fsdata = dmabuf;
 
        return file;
 
@@ -619,19 +618,11 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        size_t alloc_size = sizeof(struct dma_buf);
        int ret;
 
-       if (!exp_info->resv)
-               alloc_size += sizeof(struct dma_resv);
-       else
-               /* prevent &dma_buf[1] == dma_buf->resv */
-               alloc_size += 1;
-
-       if (WARN_ON(!exp_info->priv
-                         || !exp_info->ops
-                         || !exp_info->ops->map_dma_buf
-                         || !exp_info->ops->unmap_dma_buf
-                         || !exp_info->ops->release)) {
+       if (WARN_ON(!exp_info->priv || !exp_info->ops
+                   || !exp_info->ops->map_dma_buf
+                   || !exp_info->ops->unmap_dma_buf
+                   || !exp_info->ops->release))
                return ERR_PTR(-EINVAL);
-       }
 
        if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
                    (exp_info->ops->pin || exp_info->ops->unpin)))
@@ -643,10 +634,21 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        if (!try_module_get(exp_info->owner))
                return ERR_PTR(-ENOENT);
 
+       file = dma_buf_getfile(exp_info->size, exp_info->flags);
+       if (IS_ERR(file)) {
+               ret = PTR_ERR(file);
+               goto err_module;
+       }
+
+       if (!exp_info->resv)
+               alloc_size += sizeof(struct dma_resv);
+       else
+               /* prevent &dma_buf[1] == dma_buf->resv */
+               alloc_size += 1;
        dmabuf = kzalloc(alloc_size, GFP_KERNEL);
        if (!dmabuf) {
                ret = -ENOMEM;
-               goto err_module;
+               goto err_file;
        }
 
        dmabuf->priv = exp_info->priv;
@@ -658,43 +660,35 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
        init_waitqueue_head(&dmabuf->poll);
        dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
        dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
+       INIT_LIST_HEAD(&dmabuf->attachments);
 
        if (!resv) {
-               resv = (struct dma_resv *)&dmabuf[1];
-               dma_resv_init(resv);
+               dmabuf->resv = (struct dma_resv *)&dmabuf[1];
+               dma_resv_init(dmabuf->resv);
+       } else {
+               dmabuf->resv = resv;
        }
-       dmabuf->resv = resv;
 
-       file = dma_buf_getfile(dmabuf, exp_info->flags);
-       if (IS_ERR(file)) {
-               ret = PTR_ERR(file);
+       ret = dma_buf_stats_setup(dmabuf, file);
+       if (ret)
                goto err_dmabuf;
-       }
 
+       file->private_data = dmabuf;
+       file->f_path.dentry->d_fsdata = dmabuf;
        dmabuf->file = file;
 
-       INIT_LIST_HEAD(&dmabuf->attachments);
-
        mutex_lock(&db_list.lock);
        list_add(&dmabuf->list_node, &db_list.head);
        mutex_unlock(&db_list.lock);
 
-       ret = dma_buf_stats_setup(dmabuf);
-       if (ret)
-               goto err_sysfs;
-
        return dmabuf;
 
-err_sysfs:
-       /*
-        * Set file->f_path.dentry->d_fsdata to NULL so that when
-        * dma_buf_release() gets invoked by dentry_ops, it exits
-        * early before calling the release() dma_buf op.
-        */
-       file->f_path.dentry->d_fsdata = NULL;
-       fput(file);
 err_dmabuf:
+       if (!resv)
+               dma_resv_fini(dmabuf->resv);
        kfree(dmabuf);
+err_file:
+       fput(file);
 err_module:
        module_put(exp_info->owner);
        return ERR_PTR(ret);
index c741b6431958c20c5d46eb118fdbd46a42672e7a..8a6e6b60d66f377de23f570288c417f17f19aad2 100644 (file)
@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
        /* The channel is already in use, update client count */
        if (chan->client_count) {
                __module_get(owner);
-               goto out;
+               chan->client_count++;
+               return 0;
        }
 
        if (!try_module_get(owner))
@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
                        goto err_out;
        }
 
+       chan->client_count++;
+
        if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
                balance_ref_count(chan);
 
-out:
-       chan->client_count++;
        return 0;
 
 err_out:
index a183d93bd7e2986e6a4f385d62395cde2a5af834..bf85aa0979ecb8ecf62b83291c5bdbb19b24c2d7 100644 (file)
@@ -1018,6 +1018,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
 
        /* The bad descriptor currently is in the head of vc list */
        vd = vchan_next_desc(&chan->vc);
+       if (!vd) {
+               dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
+                       axi_chan_name(chan));
+               goto out;
+       }
        /* Remove the completed descriptor from issued list */
        list_del(&vd->node);
 
@@ -1032,6 +1037,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
        /* Try to restart the controller */
        axi_chan_start_first_queued(chan);
 
+out:
        spin_unlock_irqrestore(&chan->vc.lock, flags);
 }
 
index 06f5d3783d7719e01ad896818f5fa48a75cf945e..29dbb0f52e186c4ff5cb9948c3cfb0224b47d486 100644 (file)
@@ -1172,8 +1172,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
        spin_unlock(&ie->list_lock);
 
        list_for_each_entry_safe(desc, itr, &flist, list) {
+               struct dma_async_tx_descriptor *tx;
+
                list_del(&desc->list);
                ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+               /*
+                * wq is being disabled. Any remaining descriptors are
+                * likely to be stuck and can be dropped. callback could
+                * point to code that is no longer accessible, for example
+                * if dmatest module has been unloaded.
+                */
+               tx = &desc->txd;
+               tx->callback = NULL;
+               tx->callback_result = NULL;
                idxd_dma_complete_txd(desc, ctype, true);
        }
 }
@@ -1390,8 +1401,7 @@ err_res_alloc:
 err_irq:
        idxd_wq_unmap_portal(wq);
 err_map_portal:
-       rc = idxd_wq_disable(wq, false);
-       if (rc < 0)
+       if (idxd_wq_disable(wq, false))
                dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
 err:
        return rc;
@@ -1408,11 +1418,11 @@ void drv_disable_wq(struct idxd_wq *wq)
                dev_warn(dev, "Clients has claim on wq %d: %d\n",
                         wq->id, idxd_wq_refcount(wq));
 
-       idxd_wq_free_resources(wq);
        idxd_wq_unmap_portal(wq);
        idxd_wq_drain(wq);
        idxd_wq_free_irq(wq);
        idxd_wq_reset(wq);
+       idxd_wq_free_resources(wq);
        percpu_ref_exit(&wq->wq_active);
        wq->type = IDXD_WQT_NONE;
        wq->client_count = 0;
index fbea5f62dd98b82f09be1c2a23fc36b54fc99db9..b926abe4fa43aca6ef00f6c1ec3405be824e6edb 100644 (file)
@@ -1521,10 +1521,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
                sdma_config_ownership(sdmac, false, true, false);
 
        if (sdma_load_context(sdmac))
-               goto err_desc_out;
+               goto err_bd_out;
 
        return desc;
 
+err_bd_out:
+       sdma_free_bd(desc);
 err_desc_out:
        kfree(desc);
 err_out:
index 9b9184f964be397a4f5f5b8a7a1c2410032d6456..1709d159af7e019e04fe015fa38690c55ef71fe4 100644 (file)
@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
        }
 }
 
-static int ldma_cfg_init(struct ldma_dev *d)
+static int ldma_parse_dt(struct ldma_dev *d)
 {
        struct fwnode_handle *fwnode = dev_fwnode(d->dev);
        struct ldma_port *p;
@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
                p->ldev = d;
        }
 
-       ret = ldma_cfg_init(d);
-       if (ret)
-               return ret;
-
        dma_dev->dev = &pdev->dev;
 
        ch_mask = (unsigned long)d->channels_mask;
@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
                        ldma_dma_init_v3X(j, d);
        }
 
+       ret = ldma_parse_dt(d);
+       if (ret)
+               return ret;
+
        dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
        dma_dev->device_free_chan_resources = ldma_free_chan_resources;
        dma_dev->device_terminate_all = ldma_terminate_all;
index 377da23012ac23e826234673c1095f8dc70aefa3..a2bf13ff18b6d2904b489f880c54563c8e12410d 100644 (file)
@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
        bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
        u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
        u32 tail;
+       unsigned long flags;
 
        if (soc) {
                desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
                desc->dw0 &= ~DWORD0_SOC;
        }
-       mutex_lock(&cmd_q->q_mutex);
+       spin_lock_irqsave(&cmd_q->q_lock, flags);
 
        /* Copy 32-byte command descriptor to hw queue. */
        memcpy(q_desc, desc, 32);
@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
 
        /* Turn the queue back on using our cached control register */
        pt_start_queue(cmd_q);
-       mutex_unlock(&cmd_q->q_mutex);
+       spin_unlock_irqrestore(&cmd_q->q_lock, flags);
 
        return 0;
 }
@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
 
        cmd_q->pt = pt;
        cmd_q->dma_pool = dma_pool;
-       mutex_init(&cmd_q->q_mutex);
+       spin_lock_init(&cmd_q->q_lock);
 
        /* Page alignment satisfies our needs for N <= 128 */
        cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
index d093c43b7d1343b11227e14b815149085a32696f..21b4bf895200b5512ca91a54a19e2ce93623d46e 100644 (file)
@@ -196,7 +196,7 @@ struct pt_cmd_queue {
        struct ptdma_desc *qbase;
 
        /* Aligned queue start address (per requirement) */
-       struct mutex q_mutex ____cacheline_aligned;
+       spinlock_t q_lock ____cacheline_aligned;
        unsigned int qidx;
 
        unsigned int qsize;
index 061add83229512e07f6ed90701789e395f02522d..59a36cbf9b5f7f146d3d98b1d0f0eb127eede8e3 100644 (file)
@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
                tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
                if (spi->cmd == SPI_RX) {
                        tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+                       tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
                } else if (spi->cmd == SPI_TX) {
                        tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
                } else { /* SPI_DUPLEX */
index 1d1180db6d4ecdca4e540da718fcf733aa42438f..8f67f453a4922c2434cbfc84d378f84db7d45557 100644 (file)
@@ -711,6 +711,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
                        return err;
                }
 
+               vchan_terminate_vdesc(&tdc->dma_desc->vd);
                tegra_dma_disable(tdc);
                tdc->dma_desc = NULL;
        }
index ae39b52012b2fb74e723637652b2e07ea718defe..79da93cc77b64b0f0893332f92448a53950db0f1 100644 (file)
@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
        int ret;
 
        /* Clear any interrupts */
-       tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
+       tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
 
        /* Assert soft reset */
        tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
index ce8b80bb34d7d1f2f0d1c537d776cda691590193..4c62274e0b33264e7246cff5468eadcc4f948692 100644 (file)
@@ -762,11 +762,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
        if (uc->desc->dir == DMA_DEV_TO_MEM) {
                udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
                udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
-               udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+               if (uc->config.ep_type != PSIL_EP_NATIVE)
+                       udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
        } else {
                udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
                udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
-               if (!uc->bchan)
+               if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
                        udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
        }
 }
index a8d23cdf883e5b825a3a66576903b1043280deae..ac09f0e5f58d88929a7dd9452b9044ce82e083d3 100644 (file)
@@ -3143,8 +3143,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
        /* Initialize the channels */
        for_each_child_of_node(node, child) {
                err = xilinx_dma_child_probe(xdev, child);
-               if (err < 0)
+               if (err < 0) {
+                       of_node_put(child);
                        goto error;
+               }
        }
 
        if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
index 19522c568aa5dea572f5d2c34eac44905ed1f142..878deb4880cdb4c3998f930cc69ffdffe9035b6d 100644 (file)
@@ -394,17 +394,16 @@ static void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
  *     Then restart the workq on the new delay
  */
 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
-                                       unsigned long value)
+                                   unsigned long msec)
 {
-       unsigned long jiffs = msecs_to_jiffies(value);
-
-       if (value == 1000)
-               jiffs = round_jiffies_relative(value);
-
-       edac_dev->poll_msec = value;
-       edac_dev->delay     = jiffs;
+       edac_dev->poll_msec = msec;
+       edac_dev->delay     = msecs_to_jiffies(msec);
 
-       edac_mod_work(&edac_dev->work, jiffs);
+       /* See comment in edac_device_workq_setup() above */
+       if (edac_dev->poll_msec == 1000)
+               edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay));
+       else
+               edac_mod_work(&edac_dev->work, edac_dev->delay);
 }
 
 int edac_device_alloc_index(void)
index 763c076d96f21d368931e9d6cc68e10ab0a29ea3..47593afdc2348ff71a6600813cca8c11d01a1010 100644 (file)
@@ -53,7 +53,7 @@ bool edac_stop_work(struct delayed_work *work);
 bool edac_mod_work(struct delayed_work *work, unsigned long delay);
 
 extern void edac_device_reset_delay_period(struct edac_device_ctl_info
-                                          *edac_dev, unsigned long value);
+                                          *edac_dev, unsigned long msec);
 extern void edac_mc_reset_delay_period(unsigned long value);
 
 /*
index 61b76ec226af11dcefb8a8c91585295d37fcc610..19fba258ae108876773bd9563ff4f21038e170be 100644 (file)
@@ -174,8 +174,10 @@ static int highbank_mc_probe(struct platform_device *pdev)
        drvdata = mci->pvt_info;
        platform_set_drvdata(pdev, mci);
 
-       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL))
-               return -ENOMEM;
+       if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) {
+               res = -ENOMEM;
+               goto free;
+       }
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!r) {
@@ -243,6 +245,7 @@ err2:
        edac_mc_del_mc(&pdev->dev);
 err:
        devres_release_group(&pdev->dev, NULL);
+free:
        edac_mc_free(mci);
        return res;
 }
index f818d00bb2c69e700dc0ce5e70f1cf84df55953e..ffdad59ec81fcb5e438061aba98accede1cf7d5a 100644 (file)
@@ -910,6 +910,8 @@ static int do_xfer(const struct scmi_protocol_handle *ph,
                              xfer->hdr.protocol_id, xfer->hdr.seq,
                              xfer->hdr.poll_completion);
 
+       /* Clear any stale status */
+       xfer->hdr.status = SCMI_SUCCESS;
        xfer->state = SCMI_XFER_SENT_OK;
        /*
         * Even though spinlocking is not needed here since no race is possible
index 1dfe534b85184528c31e849668bd38edd1bf4154..87b4f4d35f06230bc161fc4205c7b199e03c0015 100644 (file)
@@ -81,10 +81,11 @@ u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem)
 void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
                          struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        xfer->hdr.status = ioread32(shmem->msg_payload);
        /* Skip the length of header and status in shmem area i.e 8 bytes */
-       xfer->rx.len = min_t(size_t, xfer->rx.len,
-                            ioread32(&shmem->length) - 8);
+       xfer->rx.len = min_t(size_t, xfer->rx.len, len > 8 ? len - 8 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload + 4, xfer->rx.len);
@@ -93,8 +94,10 @@ void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem,
 void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
                              size_t max_len, struct scmi_xfer *xfer)
 {
+       size_t len = ioread32(&shmem->length);
+
        /* Skip only the length of header in shmem area i.e 4 bytes */
-       xfer->rx.len = min_t(size_t, max_len, ioread32(&shmem->length) - 4);
+       xfer->rx.len = min_t(size_t, max_len, len > 4 ? len - 4 : 0);
 
        /* Take a copy to the rx buffer.. */
        memcpy_fromio(xfer->rx.buf, shmem->msg_payload, xfer->rx.len);
index 33c9b81a55cd11926dad6426cd2e748052150f40..1db975c08896983f994be90fea6a361cc3099389 100644 (file)
@@ -160,7 +160,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch)
        }
 
        vioch->shutdown_done = &vioch_shutdown_done;
-       virtio_break_device(vioch->vqueue->vdev);
        if (!vioch->is_rx && vioch->deferred_tx_wq)
                /* Cannot be kicked anymore after this...*/
                vioch->deferred_tx_wq = NULL;
@@ -482,6 +481,12 @@ static int virtio_chan_free(int id, void *p, void *data)
        struct scmi_chan_info *cinfo = p;
        struct scmi_vio_channel *vioch = cinfo->transport_info;
 
+       /*
+        * Break device to inhibit further traffic flowing while shutting down
+        * the channels: doing it later holding vioch->lock creates unsafe
+        * locking dependency chains as reported by LOCKDEP.
+        */
+       virtio_break_device(vioch->vqueue->vdev);
        scmi_vio_channel_cleanup_sync(vioch);
 
        scmi_free_channel(cinfo, data, id);
index 09716eebe8ac3adb2d395fa037284d9971e394bc..a2b0cbc8741c2ff1cf175c4d70a7eba6b7d37050 100644 (file)
@@ -394,8 +394,8 @@ static int __init efisubsys_init(void)
        efi_kobj = kobject_create_and_add("efi", firmware_kobj);
        if (!efi_kobj) {
                pr_err("efi: Firmware registration failed.\n");
-               destroy_workqueue(efi_rts_wq);
-               return -ENOMEM;
+               error = -ENOMEM;
+               goto err_destroy_wq;
        }
 
        if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
@@ -443,7 +443,10 @@ err_unregister:
 err_put:
        kobject_put(efi_kobj);
        efi_kobj = NULL;
-       destroy_workqueue(efi_rts_wq);
+err_destroy_wq:
+       if (efi_rts_wq)
+               destroy_workqueue(efi_rts_wq);
+
        return error;
 }
 
index 7feee3d9c2bfbeec307612ffdd3373f607cc1b62..1fba4e09cdcff8f02c8daaaff8127da66bbd8942 100644 (file)
@@ -62,6 +62,7 @@ struct efi_runtime_work efi_rts_work;
                                                                        \
        if (!efi_enabled(EFI_RUNTIME_SERVICES)) {                       \
                pr_warn_once("EFI Runtime Services are disabled!\n");   \
+               efi_rts_work.status = EFI_DEVICE_ERROR;                 \
                goto exit;                                              \
        }                                                               \
                                                                        \
index 2652c396c42368e2342bfca6814a9117ed2b745f..33ae94745aef976f376bab93cbed59024c2a905f 100644 (file)
@@ -93,14 +93,19 @@ static int coreboot_table_populate(struct device *dev, void *ptr)
        for (i = 0; i < header->table_entries; i++) {
                entry = ptr_entry;
 
-               device = kzalloc(sizeof(struct device) + entry->size, GFP_KERNEL);
+               if (entry->size < sizeof(*entry)) {
+                       dev_warn(dev, "coreboot table entry too small!\n");
+                       return -EINVAL;
+               }
+
+               device = kzalloc(sizeof(device->dev) + entry->size, GFP_KERNEL);
                if (!device)
                        return -ENOMEM;
 
                device->dev.parent = dev;
                device->dev.bus = &coreboot_bus_type;
                device->dev.release = coreboot_device_release;
-               memcpy(&device->entry, ptr_entry, entry->size);
+               memcpy(device->raw, ptr_entry, entry->size);
 
                switch (device->entry.tag) {
                case LB_TAG_CBMEM_ENTRY:
index 37f4d335a606d715fde2324b12e70e1e18179e2b..d814dca33a084cfd526d54fadf2c4353a292362c 100644 (file)
@@ -79,6 +79,7 @@ struct coreboot_device {
                struct lb_cbmem_ref cbmem_ref;
                struct lb_cbmem_entry cbmem_entry;
                struct lb_framebuffer framebuffer;
+               DECLARE_FLEX_ARRAY(u8, raw);
        };
 };
 
index 4e2575dfeb908ff50d37a1c6a5d7be4f6313ba5b..871bedf533a805f65975e1b94ae125d65f431a09 100644 (file)
@@ -361,9 +361,10 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
                memcpy(data, gsmi_dev.data_buf->start, *data_size);
 
                /* All variables are have the following attributes */
-               *attr = EFI_VARIABLE_NON_VOLATILE |
-                       EFI_VARIABLE_BOOTSERVICE_ACCESS |
-                       EFI_VARIABLE_RUNTIME_ACCESS;
+               if (attr)
+                       *attr = EFI_VARIABLE_NON_VOLATILE |
+                               EFI_VARIABLE_BOOTSERVICE_ACCESS |
+                               EFI_VARIABLE_RUNTIME_ACCESS;
        }
 
        spin_unlock_irqrestore(&gsmi_dev.lock, flags);
index e7bcfca4159f603317660d8525e6eafd4488ec94..447ee4ea5c903421f762e649318bf6ac6ff28662 100644 (file)
@@ -440,6 +440,9 @@ static const struct file_operations psci_debugfs_ops = {
 
 static int __init psci_debugfs_init(void)
 {
+       if (!invoke_psci_fn || !psci_ops.get_version)
+               return 0;
+
        return PTR_ERR_OR_ZERO(debugfs_create_file("psci", 0444, NULL, NULL,
                                                   &psci_debugfs_ops));
 }
index 8d722e026e9c98b2ef935c4f8eff3296cce769ee..84352a6f4973b5be64afc488e00d1a39d732d5d6 100644 (file)
@@ -91,7 +91,6 @@ enum sprd_eic_type {
 
 struct sprd_eic {
        struct gpio_chip chip;
-       struct irq_chip intc;
        void __iomem *base[SPRD_EIC_MAX_BANK];
        enum sprd_eic_type type;
        spinlock_t lock;
@@ -255,6 +254,8 @@ static void sprd_eic_irq_mask(struct irq_data *data)
        default:
                dev_err(chip->parent, "Unsupported EIC type.\n");
        }
+
+       gpiochip_disable_irq(chip, offset);
 }
 
 static void sprd_eic_irq_unmask(struct irq_data *data)
@@ -263,6 +264,8 @@ static void sprd_eic_irq_unmask(struct irq_data *data)
        struct sprd_eic *sprd_eic = gpiochip_get_data(chip);
        u32 offset = irqd_to_hwirq(data);
 
+       gpiochip_enable_irq(chip, offset);
+
        switch (sprd_eic->type) {
        case SPRD_EIC_DEBOUNCE:
                sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IE, 1);
@@ -564,6 +567,15 @@ static void sprd_eic_irq_handler(struct irq_desc *desc)
        chained_irq_exit(ic, desc);
 }
 
+static const struct irq_chip sprd_eic_irq = {
+       .name           = "sprd-eic",
+       .irq_ack        = sprd_eic_irq_ack,
+       .irq_mask       = sprd_eic_irq_mask,
+       .irq_unmask     = sprd_eic_irq_unmask,
+       .irq_set_type   = sprd_eic_irq_set_type,
+       .flags          = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
 static int sprd_eic_probe(struct platform_device *pdev)
 {
        const struct sprd_eic_variant_data *pdata;
@@ -626,15 +638,8 @@ static int sprd_eic_probe(struct platform_device *pdev)
                break;
        }
 
-       sprd_eic->intc.name = dev_name(&pdev->dev);
-       sprd_eic->intc.irq_ack = sprd_eic_irq_ack;
-       sprd_eic->intc.irq_mask = sprd_eic_irq_mask;
-       sprd_eic->intc.irq_unmask = sprd_eic_irq_unmask;
-       sprd_eic->intc.irq_set_type = sprd_eic_irq_set_type;
-       sprd_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
        irq = &sprd_eic->chip.irq;
-       irq->chip = &sprd_eic->intc;
+       gpio_irq_chip_set_chip(irq, &sprd_eic_irq);
        irq->handler = handle_bad_irq;
        irq->default_type = IRQ_TYPE_NONE;
        irq->parent_handler = sprd_eic_irq_handler;
index d5626c572d24ecc095eace76ce733b703cddf2b9..6f673b2f2a1bfb6edf169873b9af035a16b5f8eb 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
 #include <linux/syscore_ops.h>
 #include <linux/gpio/driver.h>
 #include <linux/of.h>
@@ -159,6 +160,7 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
 {
        struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
        struct mxc_gpio_port *port = gc->private;
+       unsigned long flags;
        u32 bit, val;
        u32 gpio_idx = d->hwirq;
        int edge;
@@ -197,6 +199,8 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
                return -EINVAL;
        }
 
+       raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
        if (GPIO_EDGE_SEL >= 0) {
                val = readl(port->base + GPIO_EDGE_SEL);
                if (edge == GPIO_INT_BOTH_EDGES)
@@ -217,15 +221,20 @@ static int gpio_set_irq_type(struct irq_data *d, u32 type)
        writel(1 << gpio_idx, port->base + GPIO_ISR);
        port->pad_type[gpio_idx] = type;
 
-       return 0;
+       raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
+
+       return port->gc.direction_input(&port->gc, gpio_idx);
 }
 
 static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
 {
        void __iomem *reg = port->base;
+       unsigned long flags;
        u32 bit, val;
        int edge;
 
+       raw_spin_lock_irqsave(&port->gc.bgpio_lock, flags);
+
        reg += GPIO_ICR1 + ((gpio & 0x10) >> 2); /* lower or upper register */
        bit = gpio & 0xf;
        val = readl(reg);
@@ -243,6 +252,8 @@ static void mxc_flip_edge(struct mxc_gpio_port *port, u32 gpio)
                return;
        }
        writel(val | (edge << (bit << 1)), reg);
+
+       raw_spin_unlock_irqrestore(&port->gc.bgpio_lock, flags);
 }
 
 /* handle 32 interrupts in one status register */
index a59d61cd44b2e5eceac9bf97c41945ac77bde0f2..5299e5bb76d6e339cf269d4cb560e42fabbf0922 100644 (file)
@@ -474,6 +474,9 @@ static u8 pcal6534_recalc_addr(struct pca953x_chip *chip, int reg, int off)
        case PCAL6524_DEBOUNCE:
                pinctrl = ((reg & PCAL_PINCTRL_MASK) >> 1) + 0x1c;
                break;
+       default:
+               pinctrl = 0;
+               break;
        }
 
        return pinctrl + addr + (off / BANK_SZ);
index e518490c4b681afa6f979f9e8a5ce5a964aa71e1..c3e4d90f6b183318caf7712f7b3f1531114f6bce 100644 (file)
@@ -47,7 +47,6 @@ enum {
 /**
  * struct sprd_pmic_eic - PMIC EIC controller
  * @chip: the gpio_chip structure.
- * @intc: the irq_chip structure.
  * @map:  the regmap from the parent device.
  * @offset: the EIC controller's offset address of the PMIC.
  * @reg: the array to cache the EIC registers.
@@ -56,7 +55,6 @@ enum {
  */
 struct sprd_pmic_eic {
        struct gpio_chip chip;
-       struct irq_chip intc;
        struct regmap *map;
        u32 offset;
        u8 reg[CACHE_NR_REGS];
@@ -151,15 +149,21 @@ static void sprd_pmic_eic_irq_mask(struct irq_data *data)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
        struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+       u32 offset = irqd_to_hwirq(data);
 
        pmic_eic->reg[REG_IE] = 0;
        pmic_eic->reg[REG_TRIG] = 0;
+
+       gpiochip_disable_irq(chip, offset);
 }
 
 static void sprd_pmic_eic_irq_unmask(struct irq_data *data)
 {
        struct gpio_chip *chip = irq_data_get_irq_chip_data(data);
        struct sprd_pmic_eic *pmic_eic = gpiochip_get_data(chip);
+       u32 offset = irqd_to_hwirq(data);
+
+       gpiochip_enable_irq(chip, offset);
 
        pmic_eic->reg[REG_IE] = 1;
        pmic_eic->reg[REG_TRIG] = 1;
@@ -292,6 +296,17 @@ static irqreturn_t sprd_pmic_eic_irq_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+static const struct irq_chip pmic_eic_irq_chip = {
+       .name                   = "sprd-pmic-eic",
+       .irq_mask               = sprd_pmic_eic_irq_mask,
+       .irq_unmask             = sprd_pmic_eic_irq_unmask,
+       .irq_set_type           = sprd_pmic_eic_irq_set_type,
+       .irq_bus_lock           = sprd_pmic_eic_bus_lock,
+       .irq_bus_sync_unlock    = sprd_pmic_eic_bus_sync_unlock,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
 static int sprd_pmic_eic_probe(struct platform_device *pdev)
 {
        struct gpio_irq_chip *irq;
@@ -338,16 +353,8 @@ static int sprd_pmic_eic_probe(struct platform_device *pdev)
        pmic_eic->chip.set = sprd_pmic_eic_set;
        pmic_eic->chip.get = sprd_pmic_eic_get;
 
-       pmic_eic->intc.name = dev_name(&pdev->dev);
-       pmic_eic->intc.irq_mask = sprd_pmic_eic_irq_mask;
-       pmic_eic->intc.irq_unmask = sprd_pmic_eic_irq_unmask;
-       pmic_eic->intc.irq_set_type = sprd_pmic_eic_irq_set_type;
-       pmic_eic->intc.irq_bus_lock = sprd_pmic_eic_bus_lock;
-       pmic_eic->intc.irq_bus_sync_unlock = sprd_pmic_eic_bus_sync_unlock;
-       pmic_eic->intc.flags = IRQCHIP_SKIP_SET_WAKE;
-
        irq = &pmic_eic->chip.irq;
-       irq->chip = &pmic_eic->intc;
+       gpio_irq_chip_set_chip(irq, &pmic_eic_irq_chip);
        irq->threaded = true;
 
        ret = devm_gpiochip_add_data(&pdev->dev, &pmic_eic->chip, pmic_eic);
index 238f3210970cfa0ed3d38d43b141afc0e03135e8..bc5660f61c570d770a2d5cb44ee6bd2768aa6189 100644 (file)
@@ -215,6 +215,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
                return -ENODEV;
        }
        parent = irq_find_host(irq_parent);
+       of_node_put(irq_parent);
        if (!parent) {
                dev_err(dev, "no IRQ parent domain\n");
                return -ENODEV;
index 9bff63990eee43c0d9f5cc52a50164564cca1f9c..072b4e6532164e5f5e12044fec58cf109513a6de 100644 (file)
@@ -120,6 +120,7 @@ static void sprd_gpio_irq_mask(struct irq_data *data)
        u32 offset = irqd_to_hwirq(data);
 
        sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 0);
+       gpiochip_disable_irq(chip, offset);
 }
 
 static void sprd_gpio_irq_ack(struct irq_data *data)
@@ -136,6 +137,7 @@ static void sprd_gpio_irq_unmask(struct irq_data *data)
        u32 offset = irqd_to_hwirq(data);
 
        sprd_gpio_update(chip, offset, SPRD_GPIO_IE, 1);
+       gpiochip_enable_irq(chip, offset);
 }
 
 static int sprd_gpio_irq_set_type(struct irq_data *data,
@@ -205,13 +207,14 @@ static void sprd_gpio_irq_handler(struct irq_desc *desc)
        chained_irq_exit(ic, desc);
 }
 
-static struct irq_chip sprd_gpio_irqchip = {
+static const struct irq_chip sprd_gpio_irqchip = {
        .name = "sprd-gpio",
        .irq_ack = sprd_gpio_irq_ack,
        .irq_mask = sprd_gpio_irq_mask,
        .irq_unmask = sprd_gpio_irq_unmask,
        .irq_set_type = sprd_gpio_irq_set_type,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
+       .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE,
+       GPIOCHIP_IRQ_RESOURCE_HELPERS,
 };
 
 static int sprd_gpio_probe(struct platform_device *pdev)
@@ -245,7 +248,7 @@ static int sprd_gpio_probe(struct platform_device *pdev)
        sprd_gpio->chip.direction_output = sprd_gpio_direction_output;
 
        irq = &sprd_gpio->chip.irq;
-       irq->chip = &sprd_gpio_irqchip;
+       gpio_irq_chip_set_chip(irq, &sprd_gpio_irqchip);
        irq->handler = handle_bad_irq;
        irq->default_type = IRQ_TYPE_NONE;
        irq->parent_handler = sprd_gpio_irq_handler;
index bed0380c51360a4e5e8cbd15959f685b4d2922fd..9ef0f5641b52182808359eb3b3274557886b6af6 100644 (file)
@@ -385,7 +385,7 @@ err:
 }
 
 static bool acpi_gpio_irq_is_wake(struct device *parent,
-                                 struct acpi_resource_gpio *agpio)
+                                 const struct acpi_resource_gpio *agpio)
 {
        unsigned int pin = agpio->pin_table[0];
 
@@ -778,7 +778,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
                lookup->info.pin_config = agpio->pin_config;
                lookup->info.debounce = agpio->debounce_timeout;
                lookup->info.gpioint = gpioint;
-               lookup->info.wake_capable = agpio->wake_capable == ACPI_WAKE_CAPABLE;
+               lookup->info.wake_capable = acpi_gpio_irq_is_wake(&lookup->info.adev->dev, agpio);
 
                /*
                 * Polarity and triggering are only specified for GpioInt
@@ -1623,6 +1623,19 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
                        .ignore_interrupt = "AMDI0030:00@18",
                },
        },
+       {
+               /*
+                * Spurious wakeups from TP_ATTN# pin
+                * Found in BIOS 1.7.8
+                * https://gitlab.freedesktop.org/drm/amd/-/issues/1722#note_1720627
+                */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_NAME, "NL5xRU"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "ELAN0415:00@9",
+               },
+       },
        {} /* Terminating entry */
 };
 
index 5a66d9616d7ccedf5e8a43235203760f29f942ee..939c776b948813c1fb5c755643ac15cbe7a4e6d9 100644 (file)
@@ -3905,8 +3905,8 @@ static struct gpio_desc *gpiod_find_and_request(struct device *consumer,
                                                const char *label,
                                                bool platform_lookup_allowed)
 {
+       unsigned long lookupflags = GPIO_LOOKUP_FLAGS_DEFAULT;
        struct gpio_desc *desc = ERR_PTR(-ENOENT);
-       unsigned long lookupflags;
        int ret;
 
        if (!IS_ERR_OR_NULL(fwnode))
index 6b74df446694b3b5911bdb5c3d771e8d21ff246e..e3e2e6e3b485974ead2b20963d0aa42f3b81d027 100644 (file)
@@ -195,6 +195,7 @@ extern int amdgpu_emu_mode;
 extern uint amdgpu_smu_memory_pool_size;
 extern int amdgpu_smu_pptable_id;
 extern uint amdgpu_dc_feature_mask;
+extern uint amdgpu_freesync_vid_mode;
 extern uint amdgpu_dc_debug_mask;
 extern uint amdgpu_dc_visual_confirm;
 extern uint amdgpu_dm_abm_level;
index b15091d8310d9d11bbbbd6cedcc415e1e77cce9f..3b5c53712d319c2ed43a51fab219d94a6e9d4988 100644 (file)
@@ -2099,7 +2099,7 @@ int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_b
        }
 
        amdgpu_amdkfd_remove_eviction_fence(
-               bo, bo->kfd_bo->process_info->eviction_fence);
+               bo, bo->vm_bo->vm->process_info->eviction_fence);
 
        amdgpu_bo_unreserve(bo);
 
index 8516c814bc9b5e2a497d6c4c0af16b3a57c6f57c..7b5ce00f060260114a0421cf47905e7a009a945c 100644 (file)
@@ -61,6 +61,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
                amdgpu_ctx_put(p->ctx);
                return -ECANCELED;
        }
+
+       amdgpu_sync_create(&p->sync);
        return 0;
 }
 
@@ -452,18 +454,6 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
        }
 
        r = amdgpu_sync_fence(&p->sync, fence);
-       if (r)
-               goto error;
-
-       /*
-        * When we have an explicit dependency it might be necessary to insert a
-        * pipeline sync to make sure that all caches etc are flushed and the
-        * next job actually sees the results from the previous one.
-        */
-       if (fence->context == p->gang_leader->base.entity->fence_context)
-               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
-
-error:
        dma_fence_put(fence);
        return r;
 }
@@ -1188,10 +1178,19 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+       struct drm_gpu_scheduler *sched;
        struct amdgpu_bo_list_entry *e;
+       struct dma_fence *fence;
        unsigned int i;
        int r;
 
+       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
+       if (r) {
+               if (r != -ERESTARTSYS)
+                       DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
+               return r;
+       }
+
        list_for_each_entry(e, &p->validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
                struct dma_resv *resv = bo->tbo.base.resv;
@@ -1211,10 +1210,24 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
                        return r;
        }
 
-       r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]);
-       if (r && r != -ERESTARTSYS)
-               DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n");
-       return r;
+       sched = p->gang_leader->base.entity->rq->sched;
+       while ((fence = amdgpu_sync_get_fence(&p->sync))) {
+               struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
+
+               /*
+                * When we have an dependency it might be necessary to insert a
+                * pipeline sync to make sure that all caches etc are flushed and the
+                * next job actually sees the results from the previous one
+                * before we start executing on the same scheduler ring.
+                */
+               if (!s_fence || s_fence->sched != sched)
+                       continue;
+
+               r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
+               if (r)
+                       return r;
+       }
+       return 0;
 }
 
 static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
@@ -1254,9 +1267,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
                        continue;
 
                fence = &p->jobs[i]->base.s_fence->scheduled;
+               dma_fence_get(fence);
                r = drm_sched_job_add_dependency(&leader->base, fence);
-               if (r)
+               if (r) {
+                       dma_fence_put(fence);
                        goto error_cleanup;
+               }
        }
 
        if (p->gang_size > 1) {
@@ -1344,6 +1360,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser)
 {
        unsigned i;
 
+       amdgpu_sync_free(&parser->sync);
        for (i = 0; i < parser->num_post_deps; i++) {
                drm_syncobj_put(parser->post_deps[i].syncobj);
                kfree(parser->post_deps[i].chain);
index afe6af9c013852d91a358c78132fa7f0adafae18..2f28a8c02f6412b687c7b5cace7bc670df352410 100644 (file)
@@ -36,6 +36,7 @@
 #include <generated/utsrelease.h>
 #include <linux/pci-p2pdma.h>
 
+#include <drm/drm_aperture.h>
 #include <drm/drm_atomic_helper.h>
 #include <drm/drm_fb_helper.h>
 #include <drm/drm_probe_helper.h>
@@ -90,6 +91,8 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
 #define AMDGPU_MAX_RETRY_LIMIT         2
 #define AMDGPU_RETRY_SRIOV_RESET(r) ((r) == -EBUSY || (r) == -ETIMEDOUT || (r) == -EINVAL)
 
+static const struct drm_driver amdgpu_kms_driver;
+
 const char *amdgpu_asic_name[] = {
        "TAHITI",
        "PITCAIRN",
@@ -3687,6 +3690,11 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        if (r)
                return r;
 
+       /* Get rid of things like offb */
+       r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
+       if (r)
+               return r;
+
        /* Enable TMZ based on IP_VERSION */
        amdgpu_gmc_tmz_set(adev);
 
index b4f2d61ea0d53b1b5f24e37f6d0236281f6893b4..cd4caaa295282df402b36a9fd39d081bc7fcae09 100644 (file)
@@ -23,7 +23,6 @@
  */
 
 #include <drm/amdgpu_drm.h>
-#include <drm/drm_aperture.h>
 #include <drm/drm_drv.h>
 #include <drm/drm_fbdev_generic.h>
 #include <drm/drm_gem.h>
@@ -181,6 +180,7 @@ int amdgpu_mes_kiq;
 int amdgpu_noretry = -1;
 int amdgpu_force_asic_type = -1;
 int amdgpu_tmz = -1; /* auto */
+uint amdgpu_freesync_vid_mode;
 int amdgpu_reset_method = -1; /* auto */
 int amdgpu_num_kcq = -1;
 int amdgpu_smartshift_bias;
@@ -879,6 +879,32 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444);
 MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
 module_param_named(tmz, amdgpu_tmz, int, 0444);
 
+/**
+ * DOC: freesync_video (uint)
+ * Enable the optimization to adjust front porch timing to achieve seamless
+ * mode change experience when setting a freesync supported mode for which full
+ * modeset is not needed.
+ *
+ * The Display Core will add a set of modes derived from the base FreeSync
+ * video mode into the corresponding connector's mode list based on commonly
+ * used refresh rates and VRR range of the connected display, when users enable
+ * this feature. From the userspace perspective, they can see a seamless mode
+ * change experience when the change between different refresh rates under the
+ * same resolution. Additionally, userspace applications such as Video playback
+ * can read this modeset list and change the refresh rate based on the video
+ * frame rate. Finally, the userspace can also derive an appropriate mode for a
+ * particular refresh rate based on the FreeSync Mode and add it to the
+ * connector's mode list.
+ *
+ * Note: This is an experimental feature.
+ *
+ * The default value: 0 (off).
+ */
+MODULE_PARM_DESC(
+       freesync_video,
+       "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
+module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
+
 /**
  * DOC: reset_method (int)
  * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
@@ -2095,11 +2121,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
        }
 #endif
 
-       /* Get rid of things like offb */
-       ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
-       if (ret)
-               return ret;
-
        adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
        if (IS_ERR(adev))
                return PTR_ERR(adev);
index 23692e5d4d13b3d8b10fe5ad8502183a58dfb6ae..3380daf42da8af2a3884d3bf576ccbdbae846055 100644 (file)
@@ -156,6 +156,9 @@ static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev)
                return amdgpu_compute_multipipe == 1;
        }
 
+       if (adev->ip_versions[GC_HWIP][0] > IP_VERSION(9, 0, 0))
+               return true;
+
        /* FIXME: spreading the queues across pipes causes perf regressions
         * on POLARIS11 compute workloads */
        if (adev->asic_type == CHIP_POLARIS11)
index fcb711a11a5b6bc4d4cff56289c97df89d1a70d4..3f07b1a2ce47f93e8e176e104e064fa6fbd36635 100644 (file)
@@ -497,6 +497,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
            !--id_mgr->reserved_use_count) {
                /* give the reserved ID back to normal round robin */
                list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
+               id_mgr->reserved = NULL;
        }
        vm->reserved_vmid[vmhub] = false;
        mutex_unlock(&id_mgr->lock);
index 9e549923622bd48b6dab1e11b2cbab8432f89b99..c3d9d75143f4ffb8d1a9b3ac6c53b9ce4b84211a 100644 (file)
@@ -161,8 +161,14 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
        struct dma_fence *f;
        unsigned i;
 
-       /* use sched fence if available */
-       f = job->base.s_fence ? &job->base.s_fence->finished :  &job->hw_fence;
+       /* Check if any fences where initialized */
+       if (job->base.s_fence && job->base.s_fence->finished.ops)
+               f = &job->base.s_fence->finished;
+       else if (job->hw_fence.ops)
+               f = &job->hw_fence;
+       else
+               f = NULL;
+
        for (i = 0; i < job->num_ibs; ++i)
                amdgpu_ib_free(ring->adev, &job->ibs[i], f);
 }
index 4e684c2afc709f622a1bbbf1f6d74b1ff3371b76..25a68d8888e0d597a393e5a05fca1e708230ba5e 100644 (file)
@@ -470,8 +470,9 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
        return true;
 
 fail:
-       DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
-                 man->size);
+       if (man)
+               DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
+                         man->size);
        return false;
 }
 
index bac7976975bd3b3672f87ceb9e48bc0486d96fd7..dcd8c066bc1f5024814853a9f9689b3c8eef4132 100644 (file)
@@ -391,8 +391,10 @@ int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
 
                dma_fence_get(f);
                r = drm_sched_job_add_dependency(&job->base, f);
-               if (r)
+               if (r) {
+                       dma_fence_put(f);
                        return r;
+               }
        }
        return 0;
 }
index faa12146635cf1dc8c864bcd4a4ea56826bfe96a..9fa1d814508a6cd42e31f3f0fa8b0150835af548 100644 (file)
@@ -882,7 +882,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
                kfree(rsv);
 
        list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
-               drm_buddy_free_list(&mgr->mm, &rsv->blocks);
+               drm_buddy_free_list(&mgr->mm, &rsv->allocated);
                kfree(rsv);
        }
        drm_buddy_fini(&mgr->mm);
index a56c6e106d00df054da9c7e061fd07f902359a83..b9b57a66e113f9c66027ac92bc616645a2bed494 100644 (file)
@@ -1287,10 +1287,8 @@ static int gfx_v11_0_sw_init(void *handle)
 
        switch (adev->ip_versions[GC_HWIP][0]) {
        case IP_VERSION(11, 0, 0):
-       case IP_VERSION(11, 0, 1):
        case IP_VERSION(11, 0, 2):
        case IP_VERSION(11, 0, 3):
-       case IP_VERSION(11, 0, 4):
                adev->gfx.me.num_me = 1;
                adev->gfx.me.num_pipe_per_me = 1;
                adev->gfx.me.num_queue_per_pipe = 1;
@@ -1298,6 +1296,15 @@ static int gfx_v11_0_sw_init(void *handle)
                adev->gfx.mec.num_pipe_per_mec = 4;
                adev->gfx.mec.num_queue_per_pipe = 4;
                break;
+       case IP_VERSION(11, 0, 1):
+       case IP_VERSION(11, 0, 4):
+               adev->gfx.me.num_me = 1;
+               adev->gfx.me.num_pipe_per_me = 1;
+               adev->gfx.me.num_queue_per_pipe = 1;
+               adev->gfx.mec.num_mec = 1;
+               adev->gfx.mec.num_pipe_per_mec = 4;
+               adev->gfx.mec.num_queue_per_pipe = 4;
+               break;
        default:
                adev->gfx.me.num_me = 1;
                adev->gfx.me.num_pipe_per_me = 1;
index ecb4c3abc629739e18a7940761e146129dc654cf..c06ada0844ba154bfe61782c46abef90b4e8af7d 100644 (file)
@@ -200,7 +200,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
        queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
 
        if (q->wptr_bo) {
-               wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
+               wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
                queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
        }
 
index 814f99888ab10b276d135319125891db0f2721f8..b94d2c1422ad824d42af5d7bffa760cc9ca453fa 100644 (file)
@@ -570,6 +570,15 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
                goto reserve_bo_failed;
        }
 
+       if (clear) {
+               r = amdgpu_bo_sync_wait(bo, AMDGPU_FENCE_OWNER_KFD, false);
+               if (r) {
+                       pr_debug("failed %d to sync bo\n", r);
+                       amdgpu_bo_unreserve(bo);
+                       goto reserve_bo_failed;
+               }
+       }
+
        r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
        if (r) {
                pr_debug("failed %d to reserve bo\n", r);
index bceb1a5b25186d71b366f2cc7d672c9b904a97ae..3fdaba56be6fbf8a7916b5a7ed1d15e2337b9258 100644 (file)
@@ -801,7 +801,7 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
 
                p2plink->attr.name = "properties";
                p2plink->attr.mode = KFD_SYSFS_FILE_MODE;
-               sysfs_attr_init(&iolink->attr);
+               sysfs_attr_init(&p2plink->attr);
                ret = sysfs_create_file(p2plink->kobj, &p2plink->attr);
                if (ret < 0)
                        return ret;
index 50c783e19f5ab03fa23444a12e8eb57fed00fb72..4d42033a703fa2533d20cfab1b2538e3280f0c9c 100644 (file)
@@ -1503,8 +1503,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
                case IP_VERSION(3, 0, 1):
                case IP_VERSION(3, 1, 2):
                case IP_VERSION(3, 1, 3):
-               case IP_VERSION(3, 1, 4):
-               case IP_VERSION(3, 1, 5):
                case IP_VERSION(3, 1, 6):
                        init_data.flags.gpu_vm_support = true;
                        break;
@@ -1730,10 +1728,6 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
                adev->dm.vblank_control_workqueue = NULL;
        }
 
-       for (i = 0; i < adev->dm.display_indexes_num; i++) {
-               drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
-       }
-
        amdgpu_dm_destroy_drm_device(&adev->dm);
 
 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
@@ -4361,6 +4355,10 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
                amdgpu_set_panel_orientation(&aconnector->base);
        }
 
+       /* If we didn't find a panel, notify the acpi video detection */
+       if (dm->adev->flags & AMD_IS_APU && dm->num_of_edps == 0)
+               acpi_video_report_nolcd();
+
        /* Software is initialized. Now we can register interrupt handlers. */
        switch (adev->asic_type) {
 #if defined(CONFIG_DRM_AMD_DC_SI)
@@ -5307,8 +5305,6 @@ static void fill_stream_properties_from_drm_display_mode(
 
        timing_out->aspect_ratio = get_aspect_ratio(mode_in);
 
-       stream->output_color_space = get_output_color_space(timing_out);
-
        stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
        stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
        if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
@@ -5319,6 +5315,8 @@ static void fill_stream_properties_from_drm_display_mode(
                        adjust_colour_depth_from_display_info(timing_out, info);
                }
        }
+
+       stream->output_color_space = get_output_color_space(timing_out);
 }
 
 static void fill_audio_info(struct audio_info *audio_info,
@@ -5831,7 +5829,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
                 */
                DRM_DEBUG_DRIVER("No preferred mode found\n");
        } else {
-               recalculate_timing = is_freesync_video_mode(&mode, aconnector);
+               recalculate_timing = amdgpu_freesync_vid_mode &&
+                                is_freesync_video_mode(&mode, aconnector);
                if (recalculate_timing) {
                        freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
                        drm_mode_copy(&saved_mode, &mode);
@@ -6982,7 +6981,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect
        struct amdgpu_dm_connector *amdgpu_dm_connector =
                to_amdgpu_dm_connector(connector);
 
-       if (!edid)
+       if (!(amdgpu_freesync_vid_mode && edid))
                return;
 
        if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
@@ -8846,7 +8845,8 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                 * TODO: Refactor this function to allow this check to work
                 * in all conditions.
                 */
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode &&
+                   dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
                        goto skip_modeset;
 
@@ -8881,7 +8881,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                if (!dm_old_crtc_state->stream)
                        goto skip_modeset;
 
-               if (dm_new_crtc_state->stream &&
+               if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
                    is_timing_unchanged_for_freesync(new_crtc_state,
                                                     old_crtc_state)) {
                        new_crtc_state->mode_changed = false;
@@ -8893,7 +8893,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
                        set_freesync_fixed_config(dm_new_crtc_state);
 
                        goto skip_modeset;
-               } else if (aconnector &&
+               } else if (amdgpu_freesync_vid_mode && aconnector &&
                           is_freesync_video_mode(&new_crtc_state->mode,
                                                  aconnector)) {
                        struct drm_display_mode *high_mode;
@@ -9524,8 +9524,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
                        goto fail;
                }
 
-               if (dm_old_con_state->abm_level !=
-                   dm_new_con_state->abm_level)
+               if (dm_old_con_state->abm_level != dm_new_con_state->abm_level ||
+                   dm_old_con_state->scaling != dm_new_con_state->scaling)
                        new_crtc_state->connectors_changed = true;
        }
 
index 1edf7385f8d89a6478c94a98a1e4e43075cc6461..d7a044e7973052a959032725bb56e390cf0f499a 100644 (file)
@@ -468,7 +468,6 @@ static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs
 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
 {
        drm_encoder_cleanup(encoder);
-       kfree(encoder);
 }
 
 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
index 471078fc3900d7377149ed3312841889b749b95b..652270a0b498ce45a9965d2761a8e18791c6dc51 100644 (file)
@@ -90,8 +90,8 @@ static const struct out_csc_color_matrix_type output_csc_matrix[] = {
                { 0xE00, 0xF349, 0xFEB7, 0x1000, 0x6CE, 0x16E3,
                                0x24F, 0x200, 0xFCCB, 0xF535, 0xE00, 0x1000} },
        { COLOR_SPACE_YCBCR2020_TYPE,
-               { 0x1000, 0xF149, 0xFEB7, 0x0000, 0x0868, 0x15B2,
-                               0x01E6, 0x0000, 0xFB88, 0xF478, 0x1000, 0x0000} },
+               { 0x1000, 0xF149, 0xFEB7, 0x1004, 0x0868, 0x15B2,
+                               0x01E6, 0x201, 0xFB88, 0xF478, 0x1000, 0x1004} },
        { COLOR_SPACE_YCBCR709_BLACK_TYPE,
                { 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000,
                                0x0000, 0x0200, 0x0000, 0x0000, 0x0000, 0x1000} },
index 5af601cff1a0f7f2de8601c57ba287a6b0c1c8a9..b53feeaf5cf117031d531410830292f4dc9306f9 100644 (file)
@@ -6257,12 +6257,12 @@ bool dml32_CalculateDETSwathFillLatencyHiding(unsigned int NumberOfActiveSurface
        double SwathSizePerSurfaceC[DC__NUM_DPP__MAX];
        bool NotEnoughDETSwathFillLatencyHiding = false;
 
-       /* calculate sum of single swath size for all pipes in bytes*/
+       /* calculate sum of single swath size for all pipes in bytes */
        for (k = 0; k < NumberOfActiveSurfaces; k++) {
-               SwathSizePerSurfaceY[k] += SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
+               SwathSizePerSurfaceY[k] = SwathHeightY[k] * SwathWidthY[k] * BytePerPixelInDETY[k] * NumOfDPP[k];
 
                if (SwathHeightC[k] != 0)
-                       SwathSizePerSurfaceC[k] += SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
+                       SwathSizePerSurfaceC[k] = SwathHeightC[k] * SwathWidthC[k] * BytePerPixelInDETC[k] * NumOfDPP[k];
                else
                        SwathSizePerSurfaceC[k] = 0;
 
index 85e22210963fc76de583d2139d3f76b0465eaf36..5cdc07165480b5a3be51ec46c722699d34d52b84 100644 (file)
@@ -1171,6 +1171,7 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
        int ret = 0;
        uint32_t apu_percent = 0;
        uint32_t dgpu_percent = 0;
+       struct amdgpu_device *adev = smu->adev;
 
 
        ret = smu_cmn_get_metrics_table(smu,
@@ -1196,7 +1197,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
                *value = metrics->AverageUvdActivity / 100;
                break;
        case METRICS_AVERAGE_SOCKETPOWER:
-               *value = (metrics->CurrentSocketPower << 8) / 1000;
+               if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1)) && (adev->pm.fw_version >= 0x40000f)) ||
+               ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0)) && (adev->pm.fw_version >= 0x373200)))
+                       *value = metrics->CurrentSocketPower << 8;
+               else
+                       *value = (metrics->CurrentSocketPower << 8) / 1000;
                break;
        case METRICS_TEMPERATURE_EDGE:
                *value = (metrics->GfxTemperature / 100) *
index e54b760b875bfeb12280490863e9a7ab9d4b5945..b4373b6568ae6c5780b3d33e3beac5e3f2154073 100644 (file)
@@ -1261,7 +1261,8 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
                                uint32_t speed)
 {
        struct amdgpu_device *adev = smu->adev;
-       uint32_t tach_period, crystal_clock_freq;
+       uint32_t crystal_clock_freq = 2500;
+       uint32_t tach_period;
        int ret;
 
        if (!speed)
@@ -1271,7 +1272,6 @@ int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
        if (ret)
                return ret;
 
-       crystal_clock_freq = amdgpu_asic_get_xclk(adev);
        tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
        WREG32_SOC15(THM, 0, regCG_TACH_CTRL,
                     REG_SET_FIELD(RREG32_SOC15(THM, 0, regCG_TACH_CTRL),
@@ -2298,6 +2298,10 @@ bool smu_v13_0_baco_is_support(struct smu_context *smu)
            !smu_baco->platform_support)
                return false;
 
+       /* return true if ASIC is in BACO state already */
+       if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
+               return true;
+
        if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
            !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
                return false;
index 9643b21c636a9ec17d493fd579a724ca1e07eb8e..4c20d17e7416ee42e553e19f956b6145b18fb696 100644 (file)
@@ -213,6 +213,7 @@ static struct cmn2asic_mapping smu_v13_0_0_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
index 5c6c6ad011ca63c8d8ccd285b4cf68b1eafc03a8..e87db7e02e8a5914f57a55cb12f84b336b3a7d6b 100644 (file)
@@ -192,6 +192,7 @@ static struct cmn2asic_mapping smu_v13_0_7_feature_mask_map[SMU_FEATURE_COUNT] =
        FEA_MAP(SOC_PCC),
        [SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
        [SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
+       [SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
 };
 
 static struct cmn2asic_mapping smu_v13_0_7_table_map[SMU_TABLE_COUNT] = {
index 11bb5939947180ef922049cd16470ccdd7bacef5..3d1f50f481cfda6d85845c47a95d2218028f6c51 100644 (file)
@@ -38,6 +38,25 @@ static void drm_block_free(struct drm_buddy *mm,
        kmem_cache_free(slab_blocks, block);
 }
 
+static void list_insert_sorted(struct drm_buddy *mm,
+                              struct drm_buddy_block *block)
+{
+       struct drm_buddy_block *node;
+       struct list_head *head;
+
+       head = &mm->free_list[drm_buddy_block_order(block)];
+       if (list_empty(head)) {
+               list_add(&block->link, head);
+               return;
+       }
+
+       list_for_each_entry(node, head, link)
+               if (drm_buddy_block_offset(block) < drm_buddy_block_offset(node))
+                       break;
+
+       __list_add(&block->link, node->link.prev, &node->link);
+}
+
 static void mark_allocated(struct drm_buddy_block *block)
 {
        block->header &= ~DRM_BUDDY_HEADER_STATE;
@@ -52,8 +71,7 @@ static void mark_free(struct drm_buddy *mm,
        block->header &= ~DRM_BUDDY_HEADER_STATE;
        block->header |= DRM_BUDDY_FREE;
 
-       list_add(&block->link,
-                &mm->free_list[drm_buddy_block_order(block)]);
+       list_insert_sorted(mm, block);
 }
 
 static void mark_split(struct drm_buddy_block *block)
@@ -387,20 +405,26 @@ err_undo:
 }
 
 static struct drm_buddy_block *
-get_maxblock(struct list_head *head)
+get_maxblock(struct drm_buddy *mm, unsigned int order)
 {
        struct drm_buddy_block *max_block = NULL, *node;
+       unsigned int i;
 
-       max_block = list_first_entry_or_null(head,
-                                            struct drm_buddy_block,
-                                            link);
-       if (!max_block)
-               return NULL;
+       for (i = order; i <= mm->max_order; ++i) {
+               if (!list_empty(&mm->free_list[i])) {
+                       node = list_last_entry(&mm->free_list[i],
+                                              struct drm_buddy_block,
+                                              link);
+                       if (!max_block) {
+                               max_block = node;
+                               continue;
+                       }
 
-       list_for_each_entry(node, head, link) {
-               if (drm_buddy_block_offset(node) >
-                   drm_buddy_block_offset(max_block))
-                       max_block = node;
+                       if (drm_buddy_block_offset(node) >
+                           drm_buddy_block_offset(max_block)) {
+                               max_block = node;
+                       }
+               }
        }
 
        return max_block;
@@ -412,20 +436,23 @@ alloc_from_freelist(struct drm_buddy *mm,
                    unsigned long flags)
 {
        struct drm_buddy_block *block = NULL;
-       unsigned int i;
+       unsigned int tmp;
        int err;
 
-       for (i = order; i <= mm->max_order; ++i) {
-               if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
-                       block = get_maxblock(&mm->free_list[i]);
-                       if (block)
-                               break;
-               } else {
-                       block = list_first_entry_or_null(&mm->free_list[i],
-                                                        struct drm_buddy_block,
-                                                        link);
-                       if (block)
-                               break;
+       if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
+               block = get_maxblock(mm, order);
+               if (block)
+                       /* Store the obtained block order */
+                       tmp = drm_buddy_block_order(block);
+       } else {
+               for (tmp = order; tmp <= mm->max_order; ++tmp) {
+                       if (!list_empty(&mm->free_list[tmp])) {
+                               block = list_last_entry(&mm->free_list[tmp],
+                                                       struct drm_buddy_block,
+                                                       link);
+                               if (block)
+                                       break;
+                       }
                }
        }
 
@@ -434,18 +461,18 @@ alloc_from_freelist(struct drm_buddy *mm,
 
        BUG_ON(!drm_buddy_block_is_free(block));
 
-       while (i != order) {
+       while (tmp != order) {
                err = split_block(mm, block);
                if (unlikely(err))
                        goto err_undo;
 
                block = block->right;
-               i--;
+               tmp--;
        }
        return block;
 
 err_undo:
-       if (i != order)
+       if (tmp != order)
                __drm_buddy_free(mm, block);
        return ERR_PTR(err);
 }
index b3a731b9170a6aa7919c5d1b24ef75f928561fd1..0d0c26ebab90696a297349af32314807ca5b6f9f 100644 (file)
@@ -30,7 +30,9 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/console.h>
+#include <linux/pci.h>
 #include <linux/sysrq.h>
+#include <linux/vga_switcheroo.h>
 
 #include <drm/drm_atomic.h>
 #include <drm/drm_drv.h>
@@ -1909,6 +1911,11 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                return ret;
 
        strcpy(fb_helper->fb->comm, "[fbcon]");
+
+       /* Set the fb info for vgaswitcheroo clients. Does nothing otherwise. */
+       if (dev_is_pci(dev->dev))
+               vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), fb_helper->info);
+
        return 0;
 }
 
index 52d8800a8ab86c76f88b2b5e6ab2c11d7033fd87..3659f0465a72416d69c62fef75d8ab95579f8cbc 100644 (file)
@@ -304,6 +304,12 @@ static const struct dmi_system_id orientation_data[] = {
                  DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
                },
                .driver_data = (void *)&lcd1200x1920_rightside_up,
+       }, {    /* Lenovo Ideapad D330-10IGL (HD) */
+               .matches = {
+                 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGL"),
+               },
+               .driver_data = (void *)&lcd800x1280_rightside_up,
        }, {    /* Lenovo Yoga Book X90F / X91F / X91L */
                .matches = {
                  /* Non exact match to match all versions */
index fce69fa446d586eb2b1c715f4714f024d4c39bce..2cbc1292ab3821f557e09ed349091c3685a87804 100644 (file)
 
 #include "i915_drv.h"
 #include "i915_reg.h"
+#include "intel_de.h"
 #include "intel_display_types.h"
 #include "intel_dsi.h"
 #include "intel_dsi_vbt.h"
+#include "intel_gmbus_regs.h"
 #include "vlv_dsi.h"
 #include "vlv_dsi_regs.h"
 #include "vlv_sideband.h"
@@ -377,6 +379,85 @@ static void icl_exec_gpio(struct intel_connector *connector,
        drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
 }
 
+enum {
+       MIPI_RESET_1 = 0,
+       MIPI_AVDD_EN_1,
+       MIPI_BKLT_EN_1,
+       MIPI_AVEE_EN_1,
+       MIPI_VIO_EN_1,
+       MIPI_RESET_2,
+       MIPI_AVDD_EN_2,
+       MIPI_BKLT_EN_2,
+       MIPI_AVEE_EN_2,
+       MIPI_VIO_EN_2,
+};
+
+static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
+                                     int gpio, bool value)
+{
+       int index;
+
+       if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2))
+               return;
+
+       switch (gpio) {
+       case MIPI_RESET_1:
+       case MIPI_RESET_2:
+               index = gpio == MIPI_RESET_1 ? HPD_PORT_A : HPD_PORT_B;
+
+               /*
+                * Disable HPD to set the pin to output, and set output
+                * value. The HPD pin should not be enabled for DSI anyway,
+                * assuming the board design and VBT are sane, and the pin isn't
+                * used by a non-DSI encoder.
+                *
+                * The locking protects against concurrent SHOTPLUG_CTL_DDI
+                * modifications in irq setup and handling.
+                */
+               spin_lock_irq(&dev_priv->irq_lock);
+               intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI,
+                            SHOTPLUG_CTL_DDI_HPD_ENABLE(index) |
+                            SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index),
+                            value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0);
+               spin_unlock_irq(&dev_priv->irq_lock);
+               break;
+       case MIPI_AVDD_EN_1:
+       case MIPI_AVDD_EN_2:
+               index = gpio == MIPI_AVDD_EN_1 ? 0 : 1;
+
+               intel_de_rmw(dev_priv, PP_CONTROL(index), PANEL_POWER_ON,
+                            value ? PANEL_POWER_ON : 0);
+               break;
+       case MIPI_BKLT_EN_1:
+       case MIPI_BKLT_EN_2:
+               index = gpio == MIPI_BKLT_EN_1 ? 0 : 1;
+
+               intel_de_rmw(dev_priv, PP_CONTROL(index), EDP_BLC_ENABLE,
+                            value ? EDP_BLC_ENABLE : 0);
+               break;
+       case MIPI_AVEE_EN_1:
+       case MIPI_AVEE_EN_2:
+               index = gpio == MIPI_AVEE_EN_1 ? 1 : 2;
+
+               intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+                            GPIO_CLOCK_VAL_OUT,
+                            GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_DIR_OUT |
+                            GPIO_CLOCK_VAL_MASK | (value ? GPIO_CLOCK_VAL_OUT : 0));
+               break;
+       case MIPI_VIO_EN_1:
+       case MIPI_VIO_EN_2:
+               index = gpio == MIPI_VIO_EN_1 ? 1 : 2;
+
+               intel_de_rmw(dev_priv, GPIO(dev_priv, index),
+                            GPIO_DATA_VAL_OUT,
+                            GPIO_DATA_DIR_MASK | GPIO_DATA_DIR_OUT |
+                            GPIO_DATA_VAL_MASK | (value ? GPIO_DATA_VAL_OUT : 0));
+               break;
+       default:
+               MISSING_CASE(gpio);
+       }
+}
+
 static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
 {
        struct drm_device *dev = intel_dsi->base.base.dev;
@@ -384,8 +465,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        struct intel_connector *connector = intel_dsi->attached_connector;
        u8 gpio_source, gpio_index = 0, gpio_number;
        bool value;
-
-       drm_dbg_kms(&dev_priv->drm, "\n");
+       bool native = DISPLAY_VER(dev_priv) >= 11;
 
        if (connector->panel.vbt.dsi.seq_version >= 3)
                gpio_index = *data++;
@@ -398,10 +478,18 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
        else
                gpio_source = 0;
 
+       if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
+               native = false;
+
        /* pull up/down */
        value = *data++ & 1;
 
-       if (DISPLAY_VER(dev_priv) >= 11)
+       drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+                   gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
+
+       if (native)
+               icl_native_gpio_set_value(dev_priv, gpio_number, value);
+       else if (DISPLAY_VER(dev_priv) >= 11)
                icl_exec_gpio(connector, gpio_source, gpio_index, value);
        else if (IS_VALLEYVIEW(dev_priv))
                vlv_exec_gpio(connector, gpio_source, gpio_number, value);
index 76490cc59d8f11df931e5c0a57691dcfe600357e..7d07fa3123ece747c046176f585a7091869fb15e 100644 (file)
@@ -1627,7 +1627,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
        u32 offset;
        int ret;
 
-       if (w > max_width || w < min_width || h > max_height) {
+       if (w > max_width || w < min_width || h > max_height || h < 1) {
                drm_dbg_kms(&dev_priv->drm,
                            "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n",
                            w, h, min_width, max_width, max_height);
index 7f2831efc798b9e777e84e5d7185463bfd98087c..6250de9b9196c5c5e17df3307d6def0428342112 100644 (file)
@@ -1688,6 +1688,10 @@ void i915_gem_init__contexts(struct drm_i915_private *i915)
        init_contexts(&i915->gem.contexts);
 }
 
+/*
+ * Note that this implicitly consumes the ctx reference, by placing
+ * the ctx in the context_xa.
+ */
 static void gem_context_register(struct i915_gem_context *ctx,
                                 struct drm_i915_file_private *fpriv,
                                 u32 id)
@@ -1703,10 +1707,6 @@ static void gem_context_register(struct i915_gem_context *ctx,
        snprintf(ctx->name, sizeof(ctx->name), "%s[%d]",
                 current->comm, pid_nr(ctx->pid));
 
-       /* And finally expose ourselves to userspace via the idr */
-       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
-       WARN_ON(old);
-
        spin_lock(&ctx->client->ctx_lock);
        list_add_tail_rcu(&ctx->client_link, &ctx->client->ctx_list);
        spin_unlock(&ctx->client->ctx_lock);
@@ -1714,6 +1714,10 @@ static void gem_context_register(struct i915_gem_context *ctx,
        spin_lock(&i915->gem.contexts.lock);
        list_add_tail(&ctx->link, &i915->gem.contexts.list);
        spin_unlock(&i915->gem.contexts.lock);
+
+       /* And finally expose ourselves to userspace via the idr */
+       old = xa_store(&fpriv->context_xa, id, ctx, GFP_KERNEL);
+       WARN_ON(old);
 }
 
 int i915_gem_context_open(struct drm_i915_private *i915,
@@ -2199,14 +2203,22 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
        if (IS_ERR(ctx))
                return ctx;
 
+       /*
+        * One for the xarray and one for the caller.  We need to grab
+        * the reference *prior* to making the ctx visble to userspace
+        * in gem_context_register(), as at any point after that
+        * userspace can try to race us with another thread destroying
+        * the context under our feet.
+        */
+       i915_gem_context_get(ctx);
+
        gem_context_register(ctx, file_priv, id);
 
        old = xa_erase(&file_priv->proto_context_xa, id);
        GEM_BUG_ON(old != pc);
        proto_context_close(file_priv->dev_priv, pc);
 
-       /* One for the xarray and one for the caller */
-       return i915_gem_context_get(ctx);
+       return ctx;
 }
 
 struct i915_gem_context *
index da09767fda0706aad7c41db51c088c36964e7ff4..f266b68cf012cfda711670905213c533dbe2de6b 100644 (file)
@@ -730,32 +730,69 @@ static int eb_reserve(struct i915_execbuffer *eb)
        bool unpinned;
 
        /*
-        * Attempt to pin all of the buffers into the GTT.
-        * This is done in 2 phases:
+        * We have one more buffers that we couldn't bind, which could be due to
+        * various reasons. To resolve this we have 4 passes, with every next
+        * level turning the screws tighter:
         *
-        * 1. Unbind all objects that do not match the GTT constraints for
-        *    the execbuffer (fenceable, mappable, alignment etc).
-        * 2. Bind new objects.
+        * 0. Unbind all objects that do not match the GTT constraints for the
+        * execbuffer (fenceable, mappable, alignment etc). Bind all new
+        * objects.  This avoids unnecessary unbinding of later objects in order
+        * to make room for the earlier objects *unless* we need to defragment.
         *
-        * This avoid unnecessary unbinding of later objects in order to make
-        * room for the earlier objects *unless* we need to defragment.
+        * 1. Reorder the buffers, where objects with the most restrictive
+        * placement requirements go first (ignoring fixed location buffers for
+        * now).  For example, objects needing the mappable aperture (the first
+        * 256M of GTT), should go first vs objects that can be placed just
+        * about anywhere. Repeat the previous pass.
         *
-        * Defragmenting is skipped if all objects are pinned at a fixed location.
+        * 2. Consider buffers that are pinned at a fixed location. Also try to
+        * evict the entire VM this time, leaving only objects that we were
+        * unable to lock. Try again to bind the buffers. (still using the new
+        * buffer order).
+        *
+        * 3. We likely have object lock contention for one or more stubborn
+        * objects in the VM, for which we need to evict to make forward
+        * progress (perhaps we are fighting the shrinker?). When evicting the
+        * VM this time around, anything that we can't lock we now track using
+        * the busy_bo, using the full lock (after dropping the vm->mutex to
+        * prevent deadlocks), instead of trylock. We then continue to evict the
+        * VM, this time with the stubborn object locked, which we can now
+        * hopefully unbind (if still bound in the VM). Repeat until the VM is
+        * evicted. Finally we should be able bind everything.
         */
-       for (pass = 0; pass <= 2; pass++) {
+       for (pass = 0; pass <= 3; pass++) {
                int pin_flags = PIN_USER | PIN_VALIDATE;
 
                if (pass == 0)
                        pin_flags |= PIN_NONBLOCK;
 
                if (pass >= 1)
-                       unpinned = eb_unbind(eb, pass == 2);
+                       unpinned = eb_unbind(eb, pass >= 2);
 
                if (pass == 2) {
                        err = mutex_lock_interruptible(&eb->context->vm->mutex);
                        if (!err) {
-                               err = i915_gem_evict_vm(eb->context->vm, &eb->ww);
+                               err = i915_gem_evict_vm(eb->context->vm, &eb->ww, NULL);
+                               mutex_unlock(&eb->context->vm->mutex);
+                       }
+                       if (err)
+                               return err;
+               }
+
+               if (pass == 3) {
+retry:
+                       err = mutex_lock_interruptible(&eb->context->vm->mutex);
+                       if (!err) {
+                               struct drm_i915_gem_object *busy_bo = NULL;
+
+                               err = i915_gem_evict_vm(eb->context->vm, &eb->ww, &busy_bo);
                                mutex_unlock(&eb->context->vm->mutex);
+                               if (err && busy_bo) {
+                                       err = i915_gem_object_lock(busy_bo, &eb->ww);
+                                       i915_gem_object_put(busy_bo);
+                                       if (!err)
+                                               goto retry;
+                               }
                        }
                        if (err)
                                return err;
index c29efdef8313a5acffef16332d6cf261f4ad395e..0ad44f3868ded96b202afa569807a76c18c0549a 100644 (file)
@@ -369,7 +369,7 @@ retry:
                if (vma == ERR_PTR(-ENOSPC)) {
                        ret = mutex_lock_interruptible(&ggtt->vm.mutex);
                        if (!ret) {
-                               ret = i915_gem_evict_vm(&ggtt->vm, &ww);
+                               ret = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
                                mutex_unlock(&ggtt->vm.mutex);
                        }
                        if (ret)
index beaf27e09e8a9d6fdebe9046d242a303d43d6deb..977dead10ab57c857ae3ffa319fdfc69da0a2a76 100644 (file)
@@ -1847,7 +1847,7 @@ static int igt_shrink_thp(void *arg)
                        I915_SHRINK_ACTIVE);
        i915_vma_unpin(vma);
        if (err)
-               goto out_put;
+               goto out_wf;
 
        /*
         * Now that the pages are *unpinned* shrinking should invoke
@@ -1863,19 +1863,19 @@ static int igt_shrink_thp(void *arg)
                pr_err("unexpected pages mismatch, should_swap=%s\n",
                       str_yes_no(should_swap));
                err = -EINVAL;
-               goto out_put;
+               goto out_wf;
        }
 
        if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
                pr_err("unexpected residual page-size bits, should_swap=%s\n",
                       str_yes_no(should_swap));
                err = -EINVAL;
-               goto out_put;
+               goto out_wf;
        }
 
        err = i915_vma_pin(vma, 0, 0, flags);
        if (err)
-               goto out_put;
+               goto out_wf;
 
        while (n--) {
                err = cpu_check(obj, n, 0xdeadbeaf);
index 767e329e1cc5fd4eed40fd8cc6b969d5c4978cce..9c18b5f2e7892bd15cb54dc4d378d2f4f63911f4 100644 (file)
@@ -1109,9 +1109,15 @@ static void mmio_invalidate_full(struct intel_gt *gt)
                        continue;
 
                if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+                       u32 val = BIT(engine->instance);
+
+                       if (engine->class == VIDEO_DECODE_CLASS ||
+                           engine->class == VIDEO_ENHANCEMENT_CLASS ||
+                           engine->class == COMPUTE_CLASS)
+                               val = _MASKED_BIT_ENABLE(val);
                        intel_gt_mcr_multicast_write_fw(gt,
                                                        xehp_regs[engine->class],
-                                                       BIT(engine->instance));
+                                                       val);
                } else {
                        rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
                        if (!i915_mmio_reg_offset(rb.reg))
index c3cd926917957f629be21c77956e433dd49da586..a5454af2a9cfdc380c8f23c6469c404a96ad2481 100644 (file)
 #define GEN9_WM_CHICKEN3                       _MMIO(0x5588)
 #define   GEN9_FACTOR_IN_CLR_VAL_HIZ           (1 << 9)
 
-#define CHICKEN_RASTER_1                       _MMIO(0x6204)
+#define CHICKEN_RASTER_1                       MCR_REG(0x6204)
 #define   DIS_SF_ROUND_NEAREST_EVEN            REG_BIT(8)
 
-#define CHICKEN_RASTER_2                       _MMIO(0x6208)
+#define CHICKEN_RASTER_2                       MCR_REG(0x6208)
 #define   TBIMR_FAST_CLIP                      REG_BIT(5)
 
 #define VFLSKPD                                        MCR_REG(0x62a8)
 #define   RC_OP_FLUSH_ENABLE                   (1 << 0)
 #define   HIZ_RAW_STALL_OPT_DISABLE            (1 << 2)
 #define CACHE_MODE_1                           _MMIO(0x7004) /* IVB+ */
-#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    (1 << 6)
-#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    (1 << 6)
-#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   (1 << 1)
+#define   MSAA_OPTIMIZATION_REDUC_DISABLE      REG_BIT(11)
+#define   PIXEL_SUBSPAN_COLLECT_OPT_DISABLE    REG_BIT(6)
+#define   GEN8_4x4_STC_OPTIMIZATION_DISABLE    REG_BIT(6)
+#define   GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE   REG_BIT(1)
 
 #define GEN7_GT_MODE                           _MMIO(0x7008)
 #define   GEN9_IZ_HASHING_MASK(slice)          (0x3 << ((slice) * 2))
 #define GEN8_L3CNTLREG                         _MMIO(0x7034)
 #define   GEN8_ERRDETBCTRL                     (1 << 9)
 
+#define PSS_MODE2                              _MMIO(0x703c)
+#define   SCOREBOARD_STALL_FLUSH_CONTROL       REG_BIT(5)
+
 #define GEN7_SC_INSTDONE                       _MMIO(0x7100)
 #define GEN12_SC_INSTDONE_EXTRA                        _MMIO(0x7104)
 #define GEN12_SC_INSTDONE_EXTRA2               _MMIO(0x7108)
index 24736ebee17c286758c81311846ed35c5dc9d869..78dc5e493c6222261d9cc9c0442a63313901cc51 100644 (file)
@@ -278,6 +278,7 @@ out:
 static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
 {
        struct intel_uncore *uncore = gt->uncore;
+       int loops = 2;
        int err;
 
        /*
@@ -285,18 +286,39 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
         * for fifo space for the write or forcewake the chip for
         * the read
         */
-       intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
+       do {
+               intel_uncore_write_fw(uncore, GEN6_GDRST, hw_domain_mask);
 
-       /* Wait for the device to ack the reset requests */
-       err = __intel_wait_for_register_fw(uncore,
-                                          GEN6_GDRST, hw_domain_mask, 0,
-                                          500, 0,
-                                          NULL);
+               /*
+                * Wait for the device to ack the reset requests.
+                *
+                * On some platforms, e.g. Jasperlake, we see that the
+                * engine register state is not cleared until shortly after
+                * GDRST reports completion, causing a failure as we try
+                * to immediately resume while the internal state is still
+                * in flux. If we immediately repeat the reset, the second
+                * reset appears to serialise with the first, and since
+                * it is a no-op, the registers should retain their reset
+                * value. However, there is still a concern that upon
+                * leaving the second reset, the internal engine state
+                * is still in flux and not ready for resuming.
+                */
+               err = __intel_wait_for_register_fw(uncore, GEN6_GDRST,
+                                                  hw_domain_mask, 0,
+                                                  2000, 0,
+                                                  NULL);
+       } while (err == 0 && --loops);
        if (err)
                GT_TRACE(gt,
                         "Wait for 0x%08x engines reset failed\n",
                         hw_domain_mask);
 
+       /*
+        * As we have observed that the engine state is still volatile
+        * after GDRST is acked, impose a small delay to let everything settle.
+        */
+       udelay(50);
+
        return err;
 }
 
index 2afb4f80a954df209d6b0e3efa7e09f2bb716e10..949c19339015b074af9e75662edda4268dbc4912 100644 (file)
@@ -645,7 +645,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
                                   struct i915_wa_list *wal)
 {
-       wa_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
        wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
                             REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
        wa_mcr_add(wal,
@@ -771,11 +771,19 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
 
        /* Wa_14014947963:dg2 */
        if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_B0, STEP_FOREVER) ||
-               IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+           IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
                wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
 
+       /* Wa_18018764978:dg2 */
+       if (IS_DG2_GRAPHICS_STEP(engine->i915, G10, STEP_C0, STEP_FOREVER) ||
+           IS_DG2_G11(engine->i915) || IS_DG2_G12(engine->i915))
+               wa_masked_en(wal, PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
+
        /* Wa_15010599737:dg2 */
-       wa_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+       wa_mcr_masked_en(wal, CHICKEN_RASTER_1, DIS_SF_ROUND_NEAREST_EVEN);
+
+       /* Wa_18019271663:dg2 */
+       wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
 }
 
 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
index 0c80ba51a4bdce745b119b0e104b41b9adfa0bd2..2bcdd192f8147176877294659727d58b9e6754f4 100644 (file)
@@ -545,6 +545,32 @@ static int check_ccs_header(struct intel_gt *gt,
        return 0;
 }
 
+static int try_firmware_load(struct intel_uc_fw *uc_fw, const struct firmware **fw)
+{
+       struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
+       struct device *dev = gt->i915->drm.dev;
+       int err;
+
+       err = firmware_request_nowarn(fw, uc_fw->file_selected.path, dev);
+
+       if (err)
+               return err;
+
+       if ((*fw)->size > INTEL_UC_RSVD_GGTT_PER_FW) {
+               drm_err(&gt->i915->drm,
+                       "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
+                       intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
+                       (*fw)->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
+
+               /* try to find another blob to load */
+               release_firmware(*fw);
+               *fw = NULL;
+               return -ENOENT;
+       }
+
+       return 0;
+}
+
 /**
  * intel_uc_fw_fetch - fetch uC firmware
  * @uc_fw: uC firmware
@@ -558,7 +584,6 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
        struct drm_i915_private *i915 = gt->i915;
        struct intel_uc_fw_file file_ideal;
-       struct device *dev = i915->drm.dev;
        struct drm_i915_gem_object *obj;
        const struct firmware *fw = NULL;
        bool old_ver = false;
@@ -574,20 +599,9 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
        __force_fw_fetch_failures(uc_fw, -EINVAL);
        __force_fw_fetch_failures(uc_fw, -ESTALE);
 
-       err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+       err = try_firmware_load(uc_fw, &fw);
        memcpy(&file_ideal, &uc_fw->file_wanted, sizeof(file_ideal));
 
-       if (!err && fw->size > INTEL_UC_RSVD_GGTT_PER_FW) {
-               drm_err(&i915->drm,
-                       "%s firmware %s: size (%zuKB) exceeds max supported size (%uKB)\n",
-                       intel_uc_fw_type_repr(uc_fw->type), uc_fw->file_selected.path,
-                       fw->size / SZ_1K, INTEL_UC_RSVD_GGTT_PER_FW / SZ_1K);
-
-               /* try to find another blob to load */
-               release_firmware(fw);
-               err = -ENOENT;
-       }
-
        /* Any error is terminal if overriding. Don't bother searching for older versions */
        if (err && intel_uc_fw_is_overridden(uc_fw))
                goto fail;
@@ -608,7 +622,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
                        break;
                }
 
-               err = firmware_request_nowarn(&fw, uc_fw->file_selected.path, dev);
+               err = try_firmware_load(uc_fw, &fw);
        }
 
        if (err)
index 9f1c209d9251107f95cad7ea4c28e62cf505439f..0616b73175f3e929e36682745c72e73c5b561da0 100644 (file)
@@ -151,6 +151,22 @@ DEFINE_SIMPLE_ATTRIBUTE(vgpu_scan_nonprivbb_fops,
                        vgpu_scan_nonprivbb_get, vgpu_scan_nonprivbb_set,
                        "0x%llx\n");
 
+static int vgpu_status_get(void *data, u64 *val)
+{
+       struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
+       *val = 0;
+
+       if (test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+               *val |= (1 << INTEL_VGPU_STATUS_ATTACHED);
+       if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
+               *val |= (1 << INTEL_VGPU_STATUS_ACTIVE);
+
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(vgpu_status_fops, vgpu_status_get, NULL, "0x%llx\n");
+
 /**
  * intel_gvt_debugfs_add_vgpu - register debugfs entries for a vGPU
  * @vgpu: a vGPU
@@ -162,11 +178,12 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
        snprintf(name, 16, "vgpu%d", vgpu->id);
        vgpu->debugfs = debugfs_create_dir(name, vgpu->gvt->debugfs_root);
 
-       debugfs_create_bool("active", 0444, vgpu->debugfs, &vgpu->active);
        debugfs_create_file("mmio_diff", 0444, vgpu->debugfs, vgpu,
                            &vgpu_mmio_diff_fops);
        debugfs_create_file("scan_nonprivbb", 0644, vgpu->debugfs, vgpu,
                            &vgpu_scan_nonprivbb_fops);
+       debugfs_create_file("status", 0644, vgpu->debugfs, vgpu,
+                           &vgpu_status_fops);
 }
 
 /**
@@ -175,8 +192,13 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu)
  */
 void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
 {
-       debugfs_remove_recursive(vgpu->debugfs);
-       vgpu->debugfs = NULL;
+       struct intel_gvt *gvt = vgpu->gvt;
+       struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+       if (minor->debugfs_root && gvt->debugfs_root) {
+               debugfs_remove_recursive(vgpu->debugfs);
+               vgpu->debugfs = NULL;
+       }
 }
 
 /**
@@ -199,6 +221,10 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt)
  */
 void intel_gvt_debugfs_clean(struct intel_gvt *gvt)
 {
-       debugfs_remove_recursive(gvt->debugfs_root);
-       gvt->debugfs_root = NULL;
+       struct drm_minor *minor = gvt->gt->i915->drm.primary;
+
+       if (minor->debugfs_root) {
+               debugfs_remove_recursive(gvt->debugfs_root);
+               gvt->debugfs_root = NULL;
+       }
 }
index 355f1c0e86641730b09a2851b8172325087b360d..ffe41e9be04fc091f7a984e614f0df58f408666b 100644 (file)
@@ -134,7 +134,8 @@ static void dmabuf_gem_object_free(struct kref *kref)
        struct list_head *pos;
        struct intel_vgpu_dmabuf_obj *dmabuf_obj;
 
-       if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
+       if (vgpu && test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status) &&
+           !list_empty(&vgpu->dmabuf_obj_list_head)) {
                list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
                        dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list);
                        if (dmabuf_obj == obj) {
index 51e5e8fb505bccac4627139f10ff4565f93f11f4..4ec85308379a4feb76b44ed57155449c4e4623b6 100644 (file)
@@ -55,7 +55,7 @@ static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
        int idx;
        bool ret;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return false;
 
        idx = srcu_read_lock(&kvm->srcu);
@@ -1178,7 +1178,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
        if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
                return 0;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -EINVAL;
        pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
        if (is_error_noslot_pfn(pfn))
@@ -1209,10 +1209,8 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
        for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
                ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
                                                   PAGE_SIZE, &dma_addr);
-               if (ret) {
-                       ppgtt_invalidate_spt(spt);
-                       return ret;
-               }
+               if (ret)
+                       goto err;
                sub_se.val64 = se->val64;
 
                /* Copy the PAT field from PDE. */
@@ -1231,6 +1229,17 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
        ops->set_pfn(se, sub_spt->shadow_page.mfn);
        ppgtt_set_shadow_entry(spt, se, index);
        return 0;
+err:
+       /* Cancel the existing addess mappings of DMA addr. */
+       for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
+               gvt_vdbg_mm("invalidate 4K entry\n");
+               ppgtt_invalidate_pte(sub_spt, &sub_se);
+       }
+       /* Release the new allocated spt. */
+       trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
+               sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
+       ppgtt_free_spt(sub_spt);
+       return ret;
 }
 
 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
index 62823c0e13ab83a8a2a31b2b76d6dbf6300f1c90..2d65800d8e93b9b288fda886c35448394f99506a 100644 (file)
@@ -172,13 +172,18 @@ struct intel_vgpu_submission {
 
 #define KVMGT_DEBUGFS_FILENAME         "kvmgt_nr_cache_entries"
 
+enum {
+       INTEL_VGPU_STATUS_ATTACHED = 0,
+       INTEL_VGPU_STATUS_ACTIVE,
+       INTEL_VGPU_STATUS_NR_BITS,
+};
+
 struct intel_vgpu {
        struct vfio_device vfio_device;
        struct intel_gvt *gvt;
        struct mutex vgpu_lock;
        int id;
-       bool active;
-       bool attached;
+       DECLARE_BITMAP(status, INTEL_VGPU_STATUS_NR_BITS);
        bool pv_notified;
        bool failsafe;
        unsigned int resetting_eng;
@@ -467,7 +472,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
 
 #define for_each_active_vgpu(gvt, vgpu, id) \
        idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
-               for_each_if(vgpu->active)
+               for_each_if(test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
 
 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
                                            u32 offset, u32 val, bool low)
@@ -725,7 +730,7 @@ static inline bool intel_gvt_mmio_is_cmd_write_patch(
 static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
                void *buf, unsigned long len)
 {
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, false);
 }
@@ -743,7 +748,7 @@ static inline int intel_gvt_read_gpa(struct intel_vgpu *vgpu, unsigned long gpa,
 static inline int intel_gvt_write_gpa(struct intel_vgpu *vgpu,
                unsigned long gpa, void *buf, unsigned long len)
 {
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        return vfio_dma_rw(&vgpu->vfio_device, gpa, buf, len, true);
 }
index a6b2021b665ffdd40574e4352ebad70f267e756d..68eca023bbc68b4cb06bd3ded782d078c842a5e1 100644 (file)
@@ -433,7 +433,7 @@ static int inject_virtual_interrupt(struct intel_vgpu *vgpu)
         * enabled by guest. so if msi_trigger is null, success is still
         * returned and don't inject interrupt into guest.
         */
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -ESRCH;
        if (vgpu->msi_trigger && eventfd_signal(vgpu->msi_trigger, 1) != 1)
                return -EFAULT;
index f5451adcd4890c08cb121f1e1d00d58ce64d010f..8ae7039b3683257d73a38cc1ee72299aea6b7c4d 100644 (file)
@@ -638,7 +638,7 @@ static bool __kvmgt_vgpu_exist(struct intel_vgpu *vgpu)
 
        mutex_lock(&vgpu->gvt->lock);
        for_each_active_vgpu(vgpu->gvt, itr, id) {
-               if (!itr->attached)
+               if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, itr->status))
                        continue;
 
                if (vgpu->vfio_device.kvm == itr->vfio_device.kvm) {
@@ -655,9 +655,6 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
 {
        struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
 
-       if (vgpu->attached)
-               return -EEXIST;
-
        if (!vgpu->vfio_device.kvm ||
            vgpu->vfio_device.kvm->mm != current->mm) {
                gvt_vgpu_err("KVM is required to use Intel vGPU\n");
@@ -667,14 +664,14 @@ static int intel_vgpu_open_device(struct vfio_device *vfio_dev)
        if (__kvmgt_vgpu_exist(vgpu))
                return -EEXIST;
 
-       vgpu->attached = true;
-
        vgpu->track_node.track_write = kvmgt_page_track_write;
        vgpu->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
        kvm_get_kvm(vgpu->vfio_device.kvm);
        kvm_page_track_register_notifier(vgpu->vfio_device.kvm,
                                         &vgpu->track_node);
 
+       set_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
        debugfs_create_ulong(KVMGT_DEBUGFS_FILENAME, 0444, vgpu->debugfs,
                             &vgpu->nr_cache_entries);
 
@@ -698,11 +695,10 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
 {
        struct intel_vgpu *vgpu = vfio_dev_to_vgpu(vfio_dev);
 
-       if (!vgpu->attached)
-               return;
-
        intel_gvt_release_vgpu(vgpu);
 
+       clear_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status);
+
        debugfs_remove(debugfs_lookup(KVMGT_DEBUGFS_FILENAME, vgpu->debugfs));
 
        kvm_page_track_unregister_notifier(vgpu->vfio_device.kvm,
@@ -718,8 +714,6 @@ static void intel_vgpu_close_device(struct vfio_device *vfio_dev)
        vgpu->dma_addr_cache = RB_ROOT;
 
        intel_vgpu_release_msi_eventfd_ctx(vgpu);
-
-       vgpu->attached = false;
 }
 
 static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -1512,9 +1506,6 @@ static void intel_vgpu_remove(struct mdev_device *mdev)
 {
        struct intel_vgpu *vgpu = dev_get_drvdata(&mdev->dev);
 
-       if (WARN_ON_ONCE(vgpu->attached))
-               return;
-
        vfio_unregister_group_dev(&vgpu->vfio_device);
        vfio_put_device(&vgpu->vfio_device);
 }
@@ -1559,7 +1550,7 @@ int intel_gvt_page_track_add(struct intel_vgpu *info, u64 gfn)
        struct kvm_memory_slot *slot;
        int idx;
 
-       if (!info->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
                return -ESRCH;
 
        idx = srcu_read_lock(&kvm->srcu);
@@ -1589,8 +1580,8 @@ int intel_gvt_page_track_remove(struct intel_vgpu *info, u64 gfn)
        struct kvm_memory_slot *slot;
        int idx;
 
-       if (!info->attached)
-               return 0;
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, info->status))
+               return -ESRCH;
 
        idx = srcu_read_lock(&kvm->srcu);
        slot = gfn_to_memslot(kvm, gfn);
@@ -1668,7 +1659,7 @@ int intel_gvt_dma_map_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
        struct gvt_dma *entry;
        int ret;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return -EINVAL;
 
        mutex_lock(&vgpu->cache_lock);
@@ -1714,8 +1705,8 @@ int intel_gvt_dma_pin_guest_page(struct intel_vgpu *vgpu, dma_addr_t dma_addr)
        struct gvt_dma *entry;
        int ret = 0;
 
-       if (!vgpu->attached)
-               return -ENODEV;
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
+               return -EINVAL;
 
        mutex_lock(&vgpu->cache_lock);
        entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
@@ -1742,7 +1733,7 @@ void intel_gvt_dma_unmap_guest_page(struct intel_vgpu *vgpu,
 {
        struct gvt_dma *entry;
 
-       if (!vgpu->attached)
+       if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
                return;
 
        mutex_lock(&vgpu->cache_lock);
@@ -1778,7 +1769,7 @@ static void intel_gvt_test_and_emulate_vblank(struct intel_gvt *gvt)
        idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) {
                if (test_and_clear_bit(INTEL_GVT_REQUEST_EMULATE_VBLANK + id,
                                       (void *)&gvt->service_request)) {
-                       if (vgpu->active)
+                       if (test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status))
                                intel_vgpu_emulate_vblank(vgpu);
                }
        }
index 9cd8fcbf7cad16085d8405c9fc00b077db5cfbcf..f4055804aad1feb470422a5f808454359390e771 100644 (file)
@@ -695,6 +695,7 @@ intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
 
        if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
            !workload->shadow_mm->ppgtt_mm.shadowed) {
+               intel_vgpu_unpin_mm(workload->shadow_mm);
                gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
                return -EINVAL;
        }
@@ -865,7 +866,8 @@ pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
                goto out;
        }
 
-       if (!scheduler->current_vgpu->active ||
+       if (!test_bit(INTEL_VGPU_STATUS_ACTIVE,
+                     scheduler->current_vgpu->status) ||
            list_empty(workload_q_head(scheduler->current_vgpu, engine)))
                goto out;
 
index 3c529c2705ddcaf4c796d776c42e135ecebe34f4..a5497440484f129c1525c8e4ceb137eabcf3bb9c 100644 (file)
@@ -166,9 +166,7 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
  */
 void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
 {
-       mutex_lock(&vgpu->vgpu_lock);
-       vgpu->active = true;
-       mutex_unlock(&vgpu->vgpu_lock);
+       set_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
 }
 
 /**
@@ -183,7 +181,7 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
 {
        mutex_lock(&vgpu->vgpu_lock);
 
-       vgpu->active = false;
+       clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
 
        if (atomic_read(&vgpu->submission.running_workload_num)) {
                mutex_unlock(&vgpu->vgpu_lock);
@@ -228,7 +226,8 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
        struct intel_gvt *gvt = vgpu->gvt;
        struct drm_i915_private *i915 = gvt->gt->i915;
 
-       drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
+       drm_WARN(&i915->drm, test_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status),
+                "vGPU is still active!\n");
 
        /*
         * remove idr first so later clean can judge if need to stop
@@ -285,8 +284,7 @@ struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt)
        if (ret)
                goto out_free_vgpu;
 
-       vgpu->active = false;
-
+       clear_bit(INTEL_VGPU_STATUS_ACTIVE, vgpu->status);
        return vgpu;
 
 out_free_vgpu:
index 69103ae37779217bc4235383aad1c757c63cd57c..61c38fc734cfb65c2d2790dd0b86e5f8875ce2e3 100644 (file)
@@ -1069,12 +1069,9 @@ static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
  */
 static void i915_driver_lastclose(struct drm_device *dev)
 {
-       struct drm_i915_private *i915 = to_i915(dev);
-
        intel_fbdev_restore_mode(dev);
 
-       if (HAS_DISPLAY(i915))
-               vga_switcheroo_process_delayed_switch();
+       vga_switcheroo_process_delayed_switch();
 }
 
 static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
index f025ee4fa52618cb280864b322b84f5cd8ae2c1f..a4b4d9b7d26c7abc302e88e7b770a9f9fcb9b190 100644 (file)
@@ -416,6 +416,11 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  * @vm: Address space to cleanse
  * @ww: An optional struct i915_gem_ww_ctx. If not NULL, i915_gem_evict_vm
  * will be able to evict vma's locked by the ww as well.
+ * @busy_bo: Optional pointer to struct drm_i915_gem_object. If not NULL, then
+ * in the event i915_gem_evict_vm() is unable to trylock an object for eviction,
+ * then @busy_bo will point to it. -EBUSY is also returned. The caller must drop
+ * the vm->mutex, before trying again to acquire the contended lock. The caller
+ * also owns a reference to the object.
  *
  * This function evicts all vmas from a vm.
  *
@@ -425,7 +430,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
  * To clarify: This is for freeing up virtual address space, not for freeing
  * memory in e.g. the shrinker.
  */
-int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
+int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww,
+                     struct drm_i915_gem_object **busy_bo)
 {
        int ret = 0;
 
@@ -457,15 +463,22 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
                         * the resv is shared among multiple objects, we still
                         * need the object ref.
                         */
-                       if (dying_vma(vma) ||
+                       if (!i915_gem_object_get_rcu(vma->obj) ||
                            (ww && (dma_resv_locking_ctx(vma->obj->base.resv) == &ww->ctx))) {
                                __i915_vma_pin(vma);
                                list_add(&vma->evict_link, &locked_eviction_list);
                                continue;
                        }
 
-                       if (!i915_gem_object_trylock(vma->obj, ww))
+                       if (!i915_gem_object_trylock(vma->obj, ww)) {
+                               if (busy_bo) {
+                                       *busy_bo = vma->obj; /* holds ref */
+                                       ret = -EBUSY;
+                                       break;
+                               }
+                               i915_gem_object_put(vma->obj);
                                continue;
+                       }
 
                        __i915_vma_pin(vma);
                        list_add(&vma->evict_link, &eviction_list);
@@ -473,25 +486,29 @@ int i915_gem_evict_vm(struct i915_address_space *vm, struct i915_gem_ww_ctx *ww)
                if (list_empty(&eviction_list) && list_empty(&locked_eviction_list))
                        break;
 
-               ret = 0;
                /* Unbind locked objects first, before unlocking the eviction_list */
                list_for_each_entry_safe(vma, vn, &locked_eviction_list, evict_link) {
                        __i915_vma_unpin(vma);
 
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = __i915_vma_unbind(vma);
-                       if (ret != -EINTR) /* "Get me out of here!" */
-                               ret = 0;
+                               if (ret != -EINTR) /* "Get me out of here!" */
+                                       ret = 0;
+                       }
+                       if (!dying_vma(vma))
+                               i915_gem_object_put(vma->obj);
                }
 
                list_for_each_entry_safe(vma, vn, &eviction_list, evict_link) {
                        __i915_vma_unpin(vma);
-                       if (ret == 0)
+                       if (ret == 0) {
                                ret = __i915_vma_unbind(vma);
-                       if (ret != -EINTR) /* "Get me out of here!" */
-                               ret = 0;
+                               if (ret != -EINTR) /* "Get me out of here!" */
+                                       ret = 0;
+                       }
 
                        i915_gem_object_unlock(vma->obj);
+                       i915_gem_object_put(vma->obj);
                }
        } while (ret == 0);
 
index e593c530f9bd7ac3be83a89733ff9e24ad471a47..bf0ee0e4fe60886eceac0786bc902d6398dae773 100644 (file)
@@ -11,6 +11,7 @@
 struct drm_mm_node;
 struct i915_address_space;
 struct i915_gem_ww_ctx;
+struct drm_i915_gem_object;
 
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
                                          struct i915_gem_ww_ctx *ww,
@@ -23,6 +24,7 @@ int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
                                         struct drm_mm_node *node,
                                         unsigned int flags);
 int i915_gem_evict_vm(struct i915_address_space *vm,
-                     struct i915_gem_ww_ctx *ww);
+                     struct i915_gem_ww_ctx *ww,
+                     struct drm_i915_gem_object **busy_bo);
 
 #endif /* __I915_GEM_EVICT_H__ */
index edfe363af8389f1e5f1ec396d8b39c4612be23e7..91c5339860412105d67cc5560103ab73f19c37bc 100644 (file)
@@ -1974,7 +1974,10 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
        if (ddi_hotplug_trigger) {
                u32 dig_hotplug_reg;
 
+               /* Locking due to DSI native GPIO sequences */
+               spin_lock(&dev_priv->irq_lock);
                dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0);
+               spin_unlock(&dev_priv->irq_lock);
 
                intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask,
                                   ddi_hotplug_trigger, dig_hotplug_reg,
index 6da9784fe4a244bd4088817db862f2ef677a9ffa..4fada7ebe8d8233df7d38958ff0f352466f3da68 100644 (file)
@@ -423,7 +423,8 @@ static const struct intel_device_info ilk_m_info = {
        .has_coherent_ggtt = true, \
        .has_llc = 1, \
        .has_rc6 = 1, \
-       .has_rc6p = 1, \
+       /* snb does support rc6p, but enabling it causes various issues */ \
+       .has_rc6p = 0, \
        .has_rps = true, \
        .dma_mask_size = 40, \
        .__runtime.ppgtt_type = INTEL_PPGTT_ALIASING, \
@@ -1129,7 +1130,6 @@ static const struct intel_gt_definition xelpmp_extra_gt[] = {
        {}
 };
 
-__maybe_unused
 static const struct intel_device_info mtl_info = {
        XE_HP_FEATURES,
        XE_LPDP_FEATURES,
index 8e1892d147741c47dd4a25df76169c6e5441ee6b..916176872544927f95b76f45a6964b2cba9b63af 100644 (file)
 
 #define SHOTPLUG_CTL_DDI                               _MMIO(0xc4030)
 #define   SHOTPLUG_CTL_DDI_HPD_ENABLE(hpd_pin)                 (0x8 << (_HPD_PIN_DDI(hpd_pin) * 4))
+#define   SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(hpd_pin)            (0x4 << (_HPD_PIN_DDI(hpd_pin) * 4))
 #define   SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(hpd_pin)            (0x3 << (_HPD_PIN_DDI(hpd_pin) * 4))
 #define   SHOTPLUG_CTL_DDI_HPD_NO_DETECT(hpd_pin)              (0x0 << (_HPD_PIN_DDI(hpd_pin) * 4))
 #define   SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(hpd_pin)           (0x1 << (_HPD_PIN_DDI(hpd_pin) * 4))
index 23777d500cdf95972dcc1b3f55a91eac7f5c5982..f45bd6b6cede45ca405929a0840f1d66fe13c75d 100644 (file)
@@ -19,6 +19,10 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev,
                dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n");
                return;
        }
+       if (!HAS_DISPLAY(i915)) {
+               dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n");
+               return;
+       }
 
        if (state == VGA_SWITCHEROO_ON) {
                drm_info(&i915->drm, "switched on\n");
@@ -44,7 +48,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
         * locking inversion with the driver load path. And the access here is
         * completely racy anyway. So don't bother with locking for now.
         */
-       return i915 && atomic_read(&i915->drm.open_count) == 0;
+       return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0;
 }
 
 static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
index 703fee6b5f75221ba91957892170c67d8e6189ca..135390d975b6f13b1a4c449d457850bdb6da7490 100644 (file)
@@ -1566,7 +1566,7 @@ static int __i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
                         * locked objects when called from execbuf when pinning
                         * is removed. This would probably regress badly.
                         */
-                       i915_gem_evict_vm(vm, NULL);
+                       i915_gem_evict_vm(vm, NULL, NULL);
                        mutex_unlock(&vm->mutex);
                }
        } while (1);
@@ -2116,7 +2116,7 @@ int i915_vma_unbind_async(struct i915_vma *vma, bool trylock_vm)
        if (!obj->mm.rsgt)
                return -EBUSY;
 
-       err = dma_resv_reserve_fences(obj->base.resv, 1);
+       err = dma_resv_reserve_fences(obj->base.resv, 2);
        if (err)
                return -EBUSY;
 
index 8c6517d29b8e0c409626b9d3694995cd191b151e..37068542aafe7f6399d1b1222c643e8f17e0c6e8 100644 (file)
@@ -344,7 +344,7 @@ static int igt_evict_vm(void *arg)
 
        /* Everything is pinned, nothing should happen */
        mutex_lock(&ggtt->vm.mutex);
-       err = i915_gem_evict_vm(&ggtt->vm, NULL);
+       err = i915_gem_evict_vm(&ggtt->vm, NULL, NULL);
        mutex_unlock(&ggtt->vm.mutex);
        if (err) {
                pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
@@ -356,7 +356,7 @@ static int igt_evict_vm(void *arg)
 
        for_i915_gem_ww(&ww, err, false) {
                mutex_lock(&ggtt->vm.mutex);
-               err = i915_gem_evict_vm(&ggtt->vm, &ww);
+               err = i915_gem_evict_vm(&ggtt->vm, &ww, NULL);
                mutex_unlock(&ggtt->vm.mutex);
        }
 
index dba4f7d81d69359e0553bdf41b9052cb35dd3ae9..80142d9a4a55218e1c43375c25d5543a07bbb708 100644 (file)
@@ -614,6 +614,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                break;
        }
 
+       if (ipu_plane->dp_flow == IPU_DP_FLOW_SYNC_BG)
+               width = ipu_src_rect_width(new_state);
+       else
+               width = drm_rect_width(&new_state->src) >> 16;
+
        eba = drm_plane_state_to_eba(new_state, 0);
 
        /*
@@ -622,8 +627,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
         */
        if (ipu_state->use_pre) {
                axi_id = ipu_chan_assign_axi_id(ipu_plane->dma);
-               ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id,
-                                         ipu_src_rect_width(new_state),
+               ipu_prg_channel_configure(ipu_plane->ipu_ch, axi_id, width,
                                          drm_rect_height(&new_state->src) >> 16,
                                          fb->pitches[0], fb->format->format,
                                          fb->modifier, &eba);
@@ -678,9 +682,8 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                break;
        }
 
-       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, ALIGN(drm_rect_width(dst), 8));
+       ipu_dmfc_config_wait4eot(ipu_plane->dmfc, width);
 
-       width = ipu_src_rect_width(new_state);
        height = drm_rect_height(&new_state->src) >> 16;
        info = drm_format_info(fb->format->format);
        ipu_calculate_bursts(width, info->cpp[0], fb->pitches[0],
@@ -744,8 +747,7 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
                ipu_cpmem_set_burstsize(ipu_plane->ipu_ch, 16);
 
                ipu_cpmem_zero(ipu_plane->alpha_ch);
-               ipu_cpmem_set_resolution(ipu_plane->alpha_ch,
-                                        ipu_src_rect_width(new_state),
+               ipu_cpmem_set_resolution(ipu_plane->alpha_ch, width,
                                         drm_rect_height(&new_state->src) >> 16);
                ipu_cpmem_set_format_passthrough(ipu_plane->alpha_ch, 8);
                ipu_cpmem_set_high_priority(ipu_plane->alpha_ch);
index d4b907889a21d199a43f0af7a7a6bcf56ec201c8..cd399b0b7181499218a8f969c0d320be88fd93c4 100644 (file)
@@ -436,15 +436,14 @@ void meson_viu_init(struct meson_drm *priv)
 
        /* Initialize OSD1 fifo control register */
        reg = VIU_OSD_DDR_PRIORITY_URGENT |
-               VIU_OSD_HOLD_FIFO_LINES(31) |
                VIU_OSD_FIFO_DEPTH_VAL(32) | /* fifo_depth_val: 32*8=256 */
                VIU_OSD_WORDS_PER_BURST(4) | /* 4 words in 1 burst */
                VIU_OSD_FIFO_LIMITS(2);      /* fifo_lim: 2*16=32 */
 
        if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
-               reg |= VIU_OSD_BURST_LENGTH_32;
+               reg |= (VIU_OSD_BURST_LENGTH_32 | VIU_OSD_HOLD_FIFO_LINES(31));
        else
-               reg |= VIU_OSD_BURST_LENGTH_64;
+               reg |= (VIU_OSD_BURST_LENGTH_64 | VIU_OSD_HOLD_FIFO_LINES(4));
 
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
        writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
index 6484b97c5344f7085b40bdc059d7a1b256f40688..f3c9600221d48c0a0551e4ecf86b57b5f391f763 100644 (file)
@@ -876,7 +876,8 @@ static void a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu)
 #define GBIF_CLIENT_HALT_MASK             BIT(0)
 #define GBIF_ARB_HALT_MASK                BIT(1)
 
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu,
+               bool gx_off)
 {
        struct msm_gpu *gpu = &adreno_gpu->base;
 
@@ -889,9 +890,11 @@ static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
                return;
        }
 
-       /* Halt the gx side of GBIF */
-       gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
-       spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       if (gx_off) {
+               /* Halt the gx side of GBIF */
+               gpu_write(gpu, REG_A6XX_RBBM_GBIF_HALT, 1);
+               spin_until(gpu_read(gpu, REG_A6XX_RBBM_GBIF_HALT_ACK) & 1);
+       }
 
        /* Halt new client requests on GBIF */
        gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
@@ -929,7 +932,7 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
        /* Halt the gmu cm3 core */
        gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
 
-       a6xx_bus_clear_pending_transactions(adreno_gpu);
+       a6xx_bus_clear_pending_transactions(adreno_gpu, true);
 
        /* Reset GPU core blocks */
        gpu_write(gpu, REG_A6XX_RBBM_SW_RESET_CMD, 1);
@@ -1083,7 +1086,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
                        return;
                }
 
-               a6xx_bus_clear_pending_transactions(adreno_gpu);
+               a6xx_bus_clear_pending_transactions(adreno_gpu, a6xx_gpu->hung);
 
                /* tell the GMU we want to slumber */
                ret = a6xx_gmu_notify_slumber(gmu);
index 36c8fb699b56be46193c3a04c41868b858df7e70..3be0f2928b57c1b2c9a0687fde2ad7a5e399c1e1 100644 (file)
@@ -1270,6 +1270,12 @@ static void a6xx_recover(struct msm_gpu *gpu)
        if (hang_debug)
                a6xx_dump(gpu);
 
+       /*
+        * To handle recovery specific sequences during the rpm suspend we are
+        * about to trigger
+        */
+       a6xx_gpu->hung = true;
+
        /* Halt SQE first */
        gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 3);
 
@@ -1312,6 +1318,7 @@ static void a6xx_recover(struct msm_gpu *gpu)
        mutex_unlock(&gpu->active_lock);
 
        msm_gpu_hw_init(gpu);
+       a6xx_gpu->hung = false;
 }
 
 static const char *a6xx_uche_fault_block(struct msm_gpu *gpu, u32 mid)
index ab853f61db632256db515c9719e473d6b09718a2..eea2e60ce3b7bb3baf94bffbfd40a34a90e4b669 100644 (file)
@@ -32,6 +32,7 @@ struct a6xx_gpu {
        void *llc_slice;
        void *htw_llc_slice;
        bool have_mmu500;
+       bool hung;
 };
 
 #define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
index 628806423f7d2d162c4ffd6f1b34d61f1f94c1f0..36f062c7582f9a8ec7b7b54c6a3e8abb1804252a 100644 (file)
@@ -551,13 +551,14 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
        return 0;
 }
 
+static int adreno_system_suspend(struct device *dev);
 static void adreno_unbind(struct device *dev, struct device *master,
                void *data)
 {
        struct msm_drm_private *priv = dev_get_drvdata(master);
        struct msm_gpu *gpu = dev_to_gpu(dev);
 
-       pm_runtime_force_suspend(dev);
+       WARN_ON_ONCE(adreno_system_suspend(dev));
        gpu->funcs->destroy(gpu);
 
        priv->gpu_pdev = NULL;
@@ -609,7 +610,7 @@ static int adreno_remove(struct platform_device *pdev)
 
 static void adreno_shutdown(struct platform_device *pdev)
 {
-       pm_runtime_force_suspend(&pdev->dev);
+       WARN_ON_ONCE(adreno_system_suspend(&pdev->dev));
 }
 
 static const struct of_device_id dt_match[] = {
index 57586c794b84bde8c012594e1422a66dc5021e3d..3605f095b2de24bf20640a5b5efecba65b196b14 100644 (file)
@@ -352,6 +352,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
                /* Ensure string is null terminated: */
                str[len] = '\0';
 
+               mutex_lock(&gpu->lock);
+
                if (param == MSM_PARAM_COMM) {
                        paramp = &ctx->comm;
                } else {
@@ -361,6 +363,8 @@ int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
                kfree(*paramp);
                *paramp = str;
 
+               mutex_unlock(&gpu->lock);
+
                return 0;
        }
        case MSM_PARAM_SYSPROF:
index 5d4b1c95033ff5f9fde0ffc63316c9fb5453850c..b4f9b1343d6379c939bd3fe49b3044207774295b 100644 (file)
@@ -29,11 +29,9 @@ enum {
        ADRENO_FW_MAX,
 };
 
-enum adreno_quirks {
-       ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
-       ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
-       ADRENO_QUIRK_LMLOADKILL_DISABLE = 3,
-};
+#define ADRENO_QUIRK_TWO_PASS_USE_WFI          BIT(0)
+#define ADRENO_QUIRK_FAULT_DETECT_MASK         BIT(1)
+#define ADRENO_QUIRK_LMLOADKILL_DISABLE                BIT(2)
 
 struct adreno_rev {
        uint8_t  core;
@@ -65,7 +63,7 @@ struct adreno_info {
        const char *name;
        const char *fw[ADRENO_FW_MAX];
        uint32_t gmem;
-       enum adreno_quirks quirks;
+       u64 quirks;
        struct msm_gpu *(*init)(struct drm_device *dev);
        const char *zapfw;
        u32 inactive_period;
index 7cbcef6efe17169220342e897c69a8a118c82c25..62f6ff6abf4106a6cd5a0a5db61fc5f56a379c4f 100644 (file)
@@ -132,7 +132,6 @@ static void dpu_encoder_phys_wb_set_qos(struct dpu_encoder_phys *phys_enc)
  * dpu_encoder_phys_wb_setup_fb - setup output framebuffer
  * @phys_enc:  Pointer to physical encoder
  * @fb:                Pointer to output framebuffer
- * @wb_roi:    Pointer to output region of interest
  */
 static void dpu_encoder_phys_wb_setup_fb(struct dpu_encoder_phys *phys_enc,
                struct drm_framebuffer *fb)
@@ -692,7 +691,7 @@ static void dpu_encoder_phys_wb_init_ops(struct dpu_encoder_phys_ops *ops)
 
 /**
  * dpu_encoder_phys_wb_init - initialize writeback encoder
- * @init:      Pointer to init info structure with initialization params
+ * @p: Pointer to init info structure with initialization params
  */
 struct dpu_encoder_phys *dpu_encoder_phys_wb_init(
                struct dpu_enc_phys_init_params *p)
index d030a93a08c361b203aa7111dff83d47d85694f9..cc3efed593aa196e0df506be4668c9e24fb16802 100644 (file)
@@ -423,6 +423,10 @@ void dp_aux_isr(struct drm_dp_aux *dp_aux)
 
        isr = dp_catalog_aux_get_irq(aux->catalog);
 
+       /* no interrupts pending, return immediately */
+       if (!isr)
+               return;
+
        if (!aux->cmd_busy)
                return;
 
index 4d3fdc806befda16e4b46ac94d4b1a7aca90518a..97372bb241d897b2880f4e2b0b6e0413820a4cb3 100644 (file)
@@ -532,11 +532,19 @@ static int msm_hdmi_dev_probe(struct platform_device *pdev)
 
        ret = devm_pm_runtime_enable(&pdev->dev);
        if (ret)
-               return ret;
+               goto err_put_phy;
 
        platform_set_drvdata(pdev, hdmi);
 
-       return component_add(&pdev->dev, &msm_hdmi_ops);
+       ret = component_add(&pdev->dev, &msm_hdmi_ops);
+       if (ret)
+               goto err_put_phy;
+
+       return 0;
+
+err_put_phy:
+       msm_hdmi_put_phy(hdmi);
+       return ret;
 }
 
 static int msm_hdmi_dev_remove(struct platform_device *pdev)
index 8b0b0ac74a6f1d140143f785887868533d39c434..45e81eb148a8d0bbc7d4f281294960989f473bcf 100644 (file)
@@ -1278,7 +1278,7 @@ void msm_drv_shutdown(struct platform_device *pdev)
         * msm_drm_init, drm_dev->registered is used as an indicator that the
         * shutdown will be successful.
         */
-       if (drm && drm->registered)
+       if (drm && drm->registered && priv->kms)
                drm_atomic_helper_shutdown(drm);
 }
 
index 30ed45af76ade1de740a1f3bfcc7f00bd09b619e..3802495003258a1258650a2c73c1a0572283dfc6 100644 (file)
@@ -335,6 +335,8 @@ static void get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **
        struct msm_file_private *ctx = submit->queue->ctx;
        struct task_struct *task;
 
+       WARN_ON(!mutex_is_locked(&submit->gpu->lock));
+
        /* Note that kstrdup will return NULL if argument is NULL: */
        *comm = kstrdup(ctx->comm, GFP_KERNEL);
        *cmd  = kstrdup(ctx->cmdline, GFP_KERNEL);
index 651786bc55e5a8a989a882840ac62857f1f41017..732295e2568345f1fc5a6d4e62da647e186ce104 100644 (file)
@@ -376,10 +376,18 @@ struct msm_file_private {
         */
        int sysprof;
 
-       /** comm: Overridden task comm, see MSM_PARAM_COMM */
+       /**
+        * comm: Overridden task comm, see MSM_PARAM_COMM
+        *
+        * Accessed under msm_gpu::lock
+        */
        char *comm;
 
-       /** cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE */
+       /**
+        * cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE
+        *
+        * Accessed under msm_gpu::lock
+        */
        char *cmdline;
 
        /**
index 86b28add1ffff4f32eba624b34a761479fe87811..2527afef9c199b5df6c2d418ea5a1b13dd0b2b58 100644 (file)
@@ -47,15 +47,17 @@ struct msm_mdss {
 static int msm_mdss_parse_data_bus_icc_path(struct device *dev,
                                            struct msm_mdss *msm_mdss)
 {
-       struct icc_path *path0 = of_icc_get(dev, "mdp0-mem");
-       struct icc_path *path1 = of_icc_get(dev, "mdp1-mem");
+       struct icc_path *path0;
+       struct icc_path *path1;
 
+       path0 = of_icc_get(dev, "mdp0-mem");
        if (IS_ERR_OR_NULL(path0))
                return PTR_ERR_OR_ZERO(path0);
 
        msm_mdss->path[0] = path0;
        msm_mdss->num_paths = 1;
 
+       path1 = of_icc_get(dev, "mdp1-mem");
        if (!IS_ERR_OR_NULL(path1)) {
                msm_mdss->path[1] = path1;
                msm_mdss->num_paths++;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
deleted file mode 100644 (file)
index e87de79..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * Copyright © 2007 David Airlie
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *     David Airlie
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/tty.h>
-#include <linux/sysrq.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/screen_info.h>
-#include <linux/vga_switcheroo.h>
-#include <linux/console.h>
-
-#include <drm/drm_crtc.h>
-#include <drm/drm_crtc_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_atomic.h>
-
-#include "nouveau_drv.h"
-#include "nouveau_gem.h"
-#include "nouveau_bo.h"
-#include "nouveau_fbcon.h"
-#include "nouveau_chan.h"
-#include "nouveau_vmm.h"
-
-#include "nouveau_crtc.h"
-
-MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
-int nouveau_nofbaccel = 0;
-module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
-
-MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
-static int nouveau_fbcon_bpp;
-module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
-
-static void
-nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_fillrect(info, rect);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_fillrect(info, rect);
-               else
-                       ret = nvc0_fbcon_fillrect(info, rect);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_fillrect(info, rect);
-}
-
-static void
-nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_copyarea(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_copyarea(info, image);
-               else
-                       ret = nvc0_fbcon_copyarea(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_copyarea(info, image);
-}
-
-static void
-nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nvif_device *device = &drm->client.device;
-       int ret;
-
-       if (info->state != FBINFO_STATE_RUNNING)
-               return;
-
-       ret = -ENODEV;
-       if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
-           mutex_trylock(&drm->client.mutex)) {
-               if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
-                       ret = nv04_fbcon_imageblit(info, image);
-               else
-               if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
-                       ret = nv50_fbcon_imageblit(info, image);
-               else
-                       ret = nvc0_fbcon_imageblit(info, image);
-               mutex_unlock(&drm->client.mutex);
-       }
-
-       if (ret == 0)
-               return;
-
-       if (ret != -ENODEV)
-               nouveau_fbcon_gpu_lockup(info);
-       drm_fb_helper_cfb_imageblit(info, image);
-}
-
-static int
-nouveau_fbcon_sync(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       struct nouveau_channel *chan = drm->channel;
-       int ret;
-
-       if (!chan || !chan->accel_done || in_interrupt() ||
-           info->state != FBINFO_STATE_RUNNING ||
-           info->flags & FBINFO_HWACCEL_DISABLED)
-               return 0;
-
-       if (!mutex_trylock(&drm->client.mutex))
-               return 0;
-
-       ret = nouveau_channel_idle(chan);
-       mutex_unlock(&drm->client.mutex);
-       if (ret) {
-               nouveau_fbcon_gpu_lockup(info);
-               return 0;
-       }
-
-       chan->accel_done = false;
-       return 0;
-}
-
-static int
-nouveau_fbcon_open(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       int ret = pm_runtime_get_sync(drm->dev->dev);
-       if (ret < 0 && ret != -EACCES) {
-               pm_runtime_put(drm->dev->dev);
-               return ret;
-       }
-       return 0;
-}
-
-static int
-nouveau_fbcon_release(struct fb_info *info, int user)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-       pm_runtime_put(drm->dev->dev);
-       return 0;
-}
-
-static const struct fb_ops nouveau_fbcon_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = nouveau_fbcon_fillrect,
-       .fb_copyarea = nouveau_fbcon_copyarea,
-       .fb_imageblit = nouveau_fbcon_imageblit,
-       .fb_sync = nouveau_fbcon_sync,
-};
-
-static const struct fb_ops nouveau_fbcon_sw_ops = {
-       .owner = THIS_MODULE,
-       DRM_FB_HELPER_DEFAULT_OPS,
-       .fb_open = nouveau_fbcon_open,
-       .fb_release = nouveau_fbcon_release,
-       .fb_fillrect = drm_fb_helper_cfb_fillrect,
-       .fb_copyarea = drm_fb_helper_cfb_copyarea,
-       .fb_imageblit = drm_fb_helper_cfb_imageblit,
-};
-
-void
-nouveau_fbcon_accel_save_disable(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info) {
-               drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
-               drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-       }
-}
-
-void
-nouveau_fbcon_accel_restore(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon && drm->fbcon->helper.info)
-               drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
-}
-
-static void
-nouveau_fbcon_accel_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       if (fbcon && drm->channel) {
-               console_lock();
-               if (fbcon->helper.info)
-                       fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
-               console_unlock();
-               nouveau_channel_idle(drm->channel);
-               nvif_object_dtor(&fbcon->twod);
-               nvif_object_dtor(&fbcon->blit);
-               nvif_object_dtor(&fbcon->gdi);
-               nvif_object_dtor(&fbcon->patt);
-               nvif_object_dtor(&fbcon->rop);
-               nvif_object_dtor(&fbcon->clip);
-               nvif_object_dtor(&fbcon->surf2d);
-       }
-}
-
-static void
-nouveau_fbcon_accel_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       struct fb_info *info = fbcon->helper.info;
-       int ret;
-
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
-               ret = nv04_fbcon_accel_init(info);
-       else
-       if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
-               ret = nv50_fbcon_accel_init(info);
-       else
-               ret = nvc0_fbcon_accel_init(info);
-
-       if (ret == 0)
-               info->fbops = &nouveau_fbcon_ops;
-}
-
-static void
-nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct fb_info *info = fbcon->helper.info;
-       struct fb_fillrect rect;
-
-       /* Clear the entire fbcon.  The drm will program every connector
-        * with it's preferred mode.  If the sizes differ, one display will
-        * quite likely have garbage around the console.
-        */
-       rect.dx = rect.dy = 0;
-       rect.width = info->var.xres_virtual;
-       rect.height = info->var.yres_virtual;
-       rect.color = 0;
-       rect.rop = ROP_COPY;
-       info->fbops->fb_fillrect(info, &rect);
-}
-
-static int
-nouveau_fbcon_create(struct drm_fb_helper *helper,
-                    struct drm_fb_helper_surface_size *sizes)
-{
-       struct nouveau_fbdev *fbcon =
-               container_of(helper, struct nouveau_fbdev, helper);
-       struct drm_device *dev = fbcon->helper.dev;
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nvif_device *device = &drm->client.device;
-       struct fb_info *info;
-       struct drm_framebuffer *fb;
-       struct nouveau_channel *chan;
-       struct nouveau_bo *nvbo;
-       struct drm_mode_fb_cmd2 mode_cmd = {};
-       int ret;
-
-       mode_cmd.width = sizes->surface_width;
-       mode_cmd.height = sizes->surface_height;
-
-       mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
-       mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
-
-       mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
-                                                         sizes->surface_depth);
-
-       ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
-                             mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
-                             0, 0x0000, &nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to allocate framebuffer\n");
-               goto out;
-       }
-
-       ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
-       if (ret)
-               goto out_unref;
-
-       ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
-       if (ret) {
-               NV_ERROR(drm, "failed to pin fb: %d\n", ret);
-               goto out_unref;
-       }
-
-       ret = nouveau_bo_map(nvbo);
-       if (ret) {
-               NV_ERROR(drm, "failed to map fb: %d\n", ret);
-               goto out_unpin;
-       }
-
-       chan = nouveau_nofbaccel ? NULL : drm->channel;
-       if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
-               ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
-               if (ret) {
-                       NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
-                       chan = NULL;
-               }
-       }
-
-       info = drm_fb_helper_alloc_info(helper);
-       if (IS_ERR(info)) {
-               ret = PTR_ERR(info);
-               goto out_unlock;
-       }
-
-       /* setup helper */
-       fbcon->helper.fb = fb;
-
-       if (!chan)
-               info->flags = FBINFO_HWACCEL_DISABLED;
-       else
-               info->flags = FBINFO_HWACCEL_COPYAREA |
-                             FBINFO_HWACCEL_FILLRECT |
-                             FBINFO_HWACCEL_IMAGEBLIT;
-       info->fbops = &nouveau_fbcon_sw_ops;
-       info->fix.smem_start = nvbo->bo.resource->bus.offset;
-       info->fix.smem_len = nvbo->bo.base.size;
-
-       info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
-       info->screen_size = nvbo->bo.base.size;
-
-       drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
-
-       /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
-
-       if (chan)
-               nouveau_fbcon_accel_init(dev);
-       nouveau_fbcon_zfill(dev, fbcon);
-
-       /* To allow resizeing without swapping buffers */
-       NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
-               fb->width, fb->height, nvbo->offset, nvbo);
-
-       if (dev_is_pci(dev->dev))
-               vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
-
-       return 0;
-
-out_unlock:
-       if (chan)
-               nouveau_vma_del(&fbcon->vma);
-       nouveau_bo_unmap(nvbo);
-out_unpin:
-       nouveau_bo_unpin(nvbo);
-out_unref:
-       nouveau_bo_ref(NULL, &nvbo);
-out:
-       return ret;
-}
-
-static int
-nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
-{
-       struct drm_framebuffer *fb = fbcon->helper.fb;
-       struct nouveau_bo *nvbo;
-
-       drm_fb_helper_unregister_info(&fbcon->helper);
-       drm_fb_helper_fini(&fbcon->helper);
-
-       if (fb && fb->obj[0]) {
-               nvbo = nouveau_gem_object(fb->obj[0]);
-               nouveau_vma_del(&fbcon->vma);
-               nouveau_bo_unmap(nvbo);
-               nouveau_bo_unpin(nvbo);
-               drm_framebuffer_put(fb);
-       }
-
-       return 0;
-}
-
-void nouveau_fbcon_gpu_lockup(struct fb_info *info)
-{
-       struct nouveau_fbdev *fbcon = info->par;
-       struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
-
-       NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
-       info->flags |= FBINFO_HWACCEL_DISABLED;
-}
-
-static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
-       .fb_probe = nouveau_fbcon_create,
-};
-
-static void
-nouveau_fbcon_set_suspend_work(struct work_struct *work)
-{
-       struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
-       int state = READ_ONCE(drm->fbcon_new_state);
-
-       if (state == FBINFO_STATE_RUNNING)
-               pm_runtime_get_sync(drm->dev->dev);
-
-       console_lock();
-       if (state == FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_restore(drm->dev);
-       drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
-       if (state != FBINFO_STATE_RUNNING)
-               nouveau_fbcon_accel_save_disable(drm->dev);
-       console_unlock();
-
-       if (state == FBINFO_STATE_RUNNING) {
-               nouveau_fbcon_hotplug_resume(drm->fbcon);
-               pm_runtime_mark_last_busy(drm->dev->dev);
-               pm_runtime_put_autosuspend(drm->dev->dev);
-       }
-}
-
-void
-nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm->fbcon_new_state = state;
-       /* Since runtime resume can happen as a result of a sysfs operation,
-        * it's possible we already have the console locked. So handle fbcon
-        * init/deinit from a seperate work thread
-        */
-       schedule_work(&drm->fbcon_work);
-}
-
-void
-nouveau_fbcon_output_poll_changed(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon = drm->fbcon;
-       int ret;
-
-       if (!fbcon)
-               return;
-
-       mutex_lock(&fbcon->hotplug_lock);
-
-       ret = pm_runtime_get(dev->dev);
-       if (ret == 1 || ret == -EACCES) {
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-
-               pm_runtime_mark_last_busy(dev->dev);
-               pm_runtime_put_autosuspend(dev->dev);
-       } else if (ret == 0) {
-               /* If the GPU was already in the process of suspending before
-                * this event happened, then we can't block here as we'll
-                * deadlock the runtime pmops since they wait for us to
-                * finish. So, just defer this event for when we runtime
-                * resume again. It will be handled by fbcon_work.
-                */
-               NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
-               fbcon->hotplug_waiting = true;
-               pm_runtime_put_noidle(drm->dev->dev);
-       } else {
-               DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
-                        ret);
-       }
-
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-void
-nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
-{
-       struct nouveau_drm *drm;
-
-       if (!fbcon)
-               return;
-       drm = nouveau_drm(fbcon->helper.dev);
-
-       mutex_lock(&fbcon->hotplug_lock);
-       if (fbcon->hotplug_waiting) {
-               fbcon->hotplug_waiting = false;
-
-               NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
-               drm_fb_helper_hotplug_event(&fbcon->helper);
-       }
-       mutex_unlock(&fbcon->hotplug_lock);
-}
-
-int
-nouveau_fbcon_init(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       struct nouveau_fbdev *fbcon;
-       int preferred_bpp = nouveau_fbcon_bpp;
-       int ret;
-
-       if (!dev->mode_config.num_crtc ||
-           (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-               return 0;
-
-       fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
-       if (!fbcon)
-               return -ENOMEM;
-
-       drm->fbcon = fbcon;
-       INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
-       mutex_init(&fbcon->hotplug_lock);
-
-       drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
-
-       ret = drm_fb_helper_init(dev, &fbcon->helper);
-       if (ret)
-               goto free;
-
-       if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
-               if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
-                       preferred_bpp = 8;
-               else
-               if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
-                       preferred_bpp = 16;
-               else
-                       preferred_bpp = 32;
-       }
-
-       /* disable all the possible outputs/crtcs before entering KMS mode */
-       if (!drm_drv_uses_atomic_modeset(dev))
-               drm_helper_disable_unused_functions(dev);
-
-       ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
-       if (ret)
-               goto fini;
-
-       if (fbcon->helper.info)
-               fbcon->helper.info->pixmap.buf_align = 4;
-       return 0;
-
-fini:
-       drm_fb_helper_fini(&fbcon->helper);
-free:
-       kfree(fbcon);
-       drm->fbcon = NULL;
-       return ret;
-}
-
-void
-nouveau_fbcon_fini(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-
-       if (!drm->fbcon)
-               return;
-
-       drm_kms_helper_poll_fini(dev);
-       nouveau_fbcon_accel_fini(dev);
-       nouveau_fbcon_destroy(dev, drm->fbcon);
-       kfree(drm->fbcon);
-       drm->fbcon = NULL;
-}
index 079600328be18776adee49954d8d4705fca7296a..e6403a9d66adec323e3698aee16a8649ae51248b 100644 (file)
@@ -3,7 +3,8 @@
 config DRM_PANFROST
        tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
        depends on DRM
-       depends on ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)
+       depends on ARM || ARM64 || COMPILE_TEST
+       depends on !GENERIC_ATOMIC64    # for IOMMU_IO_PGTABLE_LPAE
        depends on MMU
        select DRM_SCHED
        select IOMMU_SUPPORT
index 2fa5afe21288929e2a479c4f61f94a441a253cee..919e6cc0498281ff8c4626f3c7998e49511f47d3 100644 (file)
@@ -82,6 +82,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
        struct panfrost_gem_object *bo;
        struct drm_panfrost_create_bo *args = data;
        struct panfrost_gem_mapping *mapping;
+       int ret;
 
        if (!args->size || args->pad ||
            (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
@@ -92,21 +93,29 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
            !(args->flags & PANFROST_BO_NOEXEC))
                return -EINVAL;
 
-       bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
-                                            &args->handle);
+       bo = panfrost_gem_create(dev, args->size, args->flags);
        if (IS_ERR(bo))
                return PTR_ERR(bo);
 
+       ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+       if (ret)
+               goto out;
+
        mapping = panfrost_gem_mapping_get(bo, priv);
-       if (!mapping) {
-               drm_gem_object_put(&bo->base.base);
-               return -EINVAL;
+       if (mapping) {
+               args->offset = mapping->mmnode.start << PAGE_SHIFT;
+               panfrost_gem_mapping_put(mapping);
+       } else {
+               /* This can only happen if the handle from
+                * drm_gem_handle_create() has already been guessed and freed
+                * by user space
+                */
+               ret = -EINVAL;
        }
 
-       args->offset = mapping->mmnode.start << PAGE_SHIFT;
-       panfrost_gem_mapping_put(mapping);
-
-       return 0;
+out:
+       drm_gem_object_put(&bo->base.base);
+       return ret;
 }
 
 /**
index 293e799e2fe8144e0cb4ad0a5ca3f86822981708..3c812fbd126fd0f6b951eff0f0873a32e104722f 100644 (file)
@@ -235,12 +235,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
 }
 
 struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
-                               struct drm_device *dev, size_t size,
-                               u32 flags,
-                               uint32_t *handle)
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags)
 {
-       int ret;
        struct drm_gem_shmem_object *shmem;
        struct panfrost_gem_object *bo;
 
@@ -256,16 +252,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
        bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
        bo->is_heap = !!(flags & PANFROST_BO_HEAP);
 
-       /*
-        * Allocate an id of idr table where the obj is registered
-        * and handle has the id what user can see.
-        */
-       ret = drm_gem_handle_create(file_priv, &shmem->base, handle);
-       /* drop reference from allocate - handle holds it now. */
-       drm_gem_object_put(&shmem->base);
-       if (ret)
-               return ERR_PTR(ret);
-
        return bo;
 }
 
index 8088d5fd8480e809f0a50f61ef3b09c4f45388e4..ad2877eeeccdfc4dcce337b3e3600b9dc0ce3fcc 100644 (file)
@@ -69,10 +69,7 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev,
                                   struct sg_table *sgt);
 
 struct panfrost_gem_object *
-panfrost_gem_create_with_handle(struct drm_file *file_priv,
-                               struct drm_device *dev, size_t size,
-                               u32 flags,
-                               uint32_t *handle);
+panfrost_gem_create(struct drm_device *dev, size_t size, u32 flags);
 
 int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
 void panfrost_gem_close(struct drm_gem_object *obj,
index fe09e5be79bddceb4cb0a988ed52f724a4a0bad8..15d04a0ec623469d6bd89c8460a335b9b22cfd8f 100644 (file)
@@ -81,7 +81,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
        init_completion(&entity->entity_idle);
 
        /* We start in an idle state. */
-       complete(&entity->entity_idle);
+       complete_all(&entity->entity_idle);
 
        spin_lock_init(&entity->rq_lock);
        spsc_queue_init(&entity->job_queue);
index 31f3a1267be44fbf87562b2d0c1b54398b005feb..fd22d753b4ed0ca40ade22549b042f767e1bab30 100644 (file)
@@ -987,7 +987,7 @@ static int drm_sched_main(void *param)
                sched_job = drm_sched_entity_pop_job(entity);
 
                if (!sched_job) {
-                       complete(&entity->entity_idle);
+                       complete_all(&entity->entity_idle);
                        continue;
                }
 
@@ -998,7 +998,7 @@ static int drm_sched_main(void *param)
 
                trace_drm_run_job(sched_job, entity);
                fence = sched->ops->run_job(sched_job);
-               complete(&entity->entity_idle);
+               complete_all(&entity->entity_idle);
                drm_sched_fence_scheduled(s_fence);
 
                if (!IS_ERR_OR_NULL(fence)) {
index b29ef1085cad9265df3dedccda1748abc0c9e467..f896ef85c2f2b2503a72c5a0d56e9c22aac2c1cb 100644 (file)
@@ -12,3 +12,5 @@ obj-$(CONFIG_DRM_KUNIT_TEST) += \
        drm_mm_test.o \
        drm_plane_helper_test.o \
        drm_rect_test.o
+
+CFLAGS_drm_mm_test.o := $(DISABLE_STRUCTLEAK_PLUGIN)
index 89f12d3b4a2199d29d0065ac25014d78aa0bfacf..186b28dc70380f1477d5eabb49ece2e9ed37e848 100644 (file)
@@ -298,9 +298,9 @@ static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct dr
        return false;
 }
 
-static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
-                                    unsigned int count,
-                                    u64 size)
+static bool noinline_for_stack check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
+                                                       unsigned int count,
+                                                       u64 size)
 {
        const struct boundary {
                u64 start, size;
index ba3aa0a0fc43cbb5e0feb01e36c4eca75fc18f63..da5493f789dfe5fddacedc55219df10fd73c0779 100644 (file)
@@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
 
        clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
        if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
-               ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter);
+               ttm_move_memcpy(clear, PFN_UP(dst_mem->size), dst_iter, src_iter);
 
        if (!src_iter->ops->maps_tt)
                ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
index 43d9b3a6a352c1c451737ca06e84230176935322..c5947ed8cc812b65a63f2cf52078bee3a72ceee8 100644 (file)
@@ -179,6 +179,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
                bo->validated_shader = NULL;
        }
 
+       mutex_destroy(&bo->madv_lock);
        drm_gem_dma_free(&bo->base);
 }
 
@@ -394,7 +395,6 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
 {
        struct vc4_dev *vc4 = to_vc4_dev(dev);
        struct vc4_bo *bo;
-       int ret;
 
        if (WARN_ON_ONCE(vc4->is_vc5))
                return ERR_PTR(-ENODEV);
@@ -406,9 +406,7 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size)
        bo->madv = VC4_MADV_WILLNEED;
        refcount_set(&bo->usecnt, 0);
 
-       ret = drmm_mutex_init(dev, &bo->madv_lock);
-       if (ret)
-               return ERR_PTR(ret);
+       mutex_init(&bo->madv_lock);
 
        mutex_lock(&vc4->bo_lock);
        bo->label = VC4_BO_TYPE_KERNEL;
index 5d05093014ac3c367133bc53a78182c080a227a9..9f4a90493aeace583d0034e10e84752c90ff24ec 100644 (file)
@@ -358,10 +358,18 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */
        rc->bo_handle = handle;
+
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
@@ -723,11 +731,18 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
                drm_gem_object_release(obj);
                return ret;
        }
-       drm_gem_object_put(obj);
 
        rc_blob->res_handle = bo->hw_res_handle;
        rc_blob->bo_handle = handle;
 
+       /*
+        * The handle owns the reference now.  But we must drop our
+        * remaining reference *after* we no longer need to dereference
+        * the obj.  Otherwise userspace could guess the handle and
+        * race closing it from another thread.
+        */
+       drm_gem_object_put(obj);
+
        return 0;
 }
 
index 8d7728181de0167d2fc5df344af4e0f154f9aec8..c7e74cf130221bbed3aa447e416065b03bf3e2b4 100644 (file)
@@ -184,7 +184,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_object_array *objs = NULL;
        struct drm_gem_shmem_object *shmem_obj;
        struct virtio_gpu_object *bo;
-       struct virtio_gpu_mem_entry *ents;
+       struct virtio_gpu_mem_entry *ents = NULL;
        unsigned int nents;
        int ret;
 
@@ -210,7 +210,7 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
                ret = -ENOMEM;
                objs = virtio_gpu_array_alloc(1);
                if (!objs)
-                       goto err_put_id;
+                       goto err_free_entry;
                virtio_gpu_array_add_obj(objs, &bo->base.base);
 
                ret = virtio_gpu_array_lock_resv(objs);
@@ -239,6 +239,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
 
 err_put_objs:
        virtio_gpu_array_put_free(objs);
+err_free_entry:
+       kvfree(ents);
 err_put_id:
        virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
 err_free_gem:
index 932b125ebf3d6476c303abae91efb0b346a76f95..ddf8373c1d779c5610689f0b430a9632150fbfd6 100644 (file)
@@ -254,40 +254,6 @@ void ttm_base_object_unref(struct ttm_base_object **p_base)
        kref_put(&base->refcount, ttm_release_base);
 }
 
-/**
- * ttm_base_object_noref_lookup - look up a base object without reference
- * @tfile: The struct ttm_object_file the object is registered with.
- * @key: The object handle.
- *
- * This function looks up a ttm base object and returns a pointer to it
- * without refcounting the pointer. The returned pointer is only valid
- * until ttm_base_object_noref_release() is called, and the object
- * pointed to by the returned pointer may be doomed. Any persistent usage
- * of the object requires a refcount to be taken using kref_get_unless_zero().
- * Iff this function returns successfully it needs to be paired with
- * ttm_base_object_noref_release() and no sleeping- or scheduling functions
- * may be called inbetween these function callse.
- *
- * Return: A pointer to the object if successful or NULL otherwise.
- */
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key)
-{
-       struct vmwgfx_hash_item *hash;
-       int ret;
-
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
-       if (ret) {
-               rcu_read_unlock();
-               return NULL;
-       }
-
-       __release(RCU);
-       return hlist_entry(hash, struct ttm_ref_object, hash)->obj;
-}
-EXPORT_SYMBOL(ttm_base_object_noref_lookup);
-
 struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
                                               uint64_t key)
 {
@@ -295,15 +261,16 @@ struct ttm_base_object *ttm_base_object_lookup(struct ttm_object_file *tfile,
        struct vmwgfx_hash_item *hash;
        int ret;
 
-       rcu_read_lock();
-       ret = ttm_tfile_find_ref_rcu(tfile, key, &hash);
+       spin_lock(&tfile->lock);
+       ret = ttm_tfile_find_ref(tfile, key, &hash);
 
        if (likely(ret == 0)) {
                base = hlist_entry(hash, struct ttm_ref_object, hash)->obj;
                if (!kref_get_unless_zero(&base->refcount))
                        base = NULL;
        }
-       rcu_read_unlock();
+       spin_unlock(&tfile->lock);
+
 
        return base;
 }
index f0ebbe340ad698307faf369dd690ea74e121c502..8098a3846bae3e4f1c1626230dcc9d68141c56ae 100644 (file)
@@ -307,18 +307,4 @@ extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
 #define ttm_prime_object_kfree(__obj, __prime)         \
        kfree_rcu(__obj, __prime.base.rhead)
 
-struct ttm_base_object *
-ttm_base_object_noref_lookup(struct ttm_object_file *tfile, uint64_t key);
-
-/**
- * ttm_base_object_noref_release - release a base object pointer looked up
- * without reference
- *
- * Releases a base object pointer looked up with ttm_base_object_noref_lookup().
- */
-static inline void ttm_base_object_noref_release(void)
-{
-       __acquire(RCU);
-       rcu_read_unlock();
-}
 #endif
index 321c551784a14683f54090307c75783ad8cb30c0..aa1cd5126a321dd8f72e104304c537b75b864fb5 100644 (file)
@@ -715,44 +715,6 @@ int vmw_user_bo_lookup(struct drm_file *filp,
        return 0;
 }
 
-/**
- * vmw_user_bo_noref_lookup - Look up a vmw user buffer object without reference
- * @filp: The TTM object file the handle is registered with.
- * @handle: The user buffer object handle.
- *
- * This function looks up a struct vmw_bo and returns a pointer to the
- * struct vmw_buffer_object it derives from without refcounting the pointer.
- * The returned pointer is only valid until vmw_user_bo_noref_release() is
- * called, and the object pointed to by the returned pointer may be doomed.
- * Any persistent usage of the object requires a refcount to be taken using
- * ttm_bo_reference_unless_doomed(). Iff this function returns successfully it
- * needs to be paired with vmw_user_bo_noref_release() and no sleeping-
- * or scheduling functions may be called in between these function calls.
- *
- * Return: A struct vmw_buffer_object pointer if successful or negative
- * error pointer on failure.
- */
-struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle)
-{
-       struct vmw_buffer_object *vmw_bo;
-       struct ttm_buffer_object *bo;
-       struct drm_gem_object *gobj = drm_gem_object_lookup(filp, handle);
-
-       if (!gobj) {
-               DRM_ERROR("Invalid buffer object handle 0x%08lx.\n",
-                         (unsigned long)handle);
-               return ERR_PTR(-ESRCH);
-       }
-       vmw_bo = gem_to_vmw_bo(gobj);
-       bo = ttm_bo_get_unless_zero(&vmw_bo->base);
-       vmw_bo = vmw_buffer_object(bo);
-       drm_gem_object_put(gobj);
-
-       return vmw_bo;
-}
-
-
 /**
  * vmw_bo_fence_single - Utility function to fence a single TTM buffer
  *                       object without unreserving it.
index b062b020b3782490a79b86ead40d8481e27fbc72..5acbf5849b2703eedfbc88dcf34efb5467a6e330 100644 (file)
@@ -830,12 +830,7 @@ extern int vmw_user_resource_lookup_handle(
        uint32_t handle,
        const struct vmw_user_resource_conv *converter,
        struct vmw_resource **p_res);
-extern struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv *
-                                     converter);
+
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
                                  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -874,15 +869,6 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res)
        return !RB_EMPTY_NODE(&res->mob_node);
 }
 
-/**
- * vmw_user_resource_noref_release - release a user resource pointer looked up
- * without reference
- */
-static inline void vmw_user_resource_noref_release(void)
-{
-       ttm_base_object_noref_release();
-}
-
 /**
  * Buffer object helper functions - vmwgfx_bo.c
  */
@@ -934,8 +920,6 @@ extern void vmw_bo_unmap(struct vmw_buffer_object *vbo);
 extern void vmw_bo_move_notify(struct ttm_buffer_object *bo,
                               struct ttm_resource *mem);
 extern void vmw_bo_swap_notify(struct ttm_buffer_object *bo);
-extern struct vmw_buffer_object *
-vmw_user_bo_noref_lookup(struct drm_file *filp, u32 handle);
 
 /**
  * vmw_bo_adjust_prio - Adjust the buffer object eviction priority
index a5379f6fb5ab18cc940bb4f4c4a1a08f52f44f80..a44d53e33cdb14346545fe61e99f076004201b6b 100644 (file)
@@ -290,20 +290,26 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
        rcache->valid_handle = 0;
 }
 
+enum vmw_val_add_flags {
+       vmw_val_add_flag_none  =      0,
+       vmw_val_add_flag_noctx = 1 << 0,
+};
+
 /**
- * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced
- * rcu-protected pointer to the validation list.
+ * vmw_execbuf_res_val_add - Add a resource to the validation list.
  *
  * @sw_context: Pointer to the software context.
  * @res: Unreferenced rcu-protected pointer to the resource.
  * @dirty: Whether to change dirty status.
+ * @flags: specifies whether to use the context or not
  *
  * Returns: 0 on success. Negative error code on failure. Typical error codes
  * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
  */
-static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
+static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
+                                  struct vmw_resource *res,
+                                  u32 dirty,
+                                  u32 flags)
 {
        struct vmw_private *dev_priv = res->dev_priv;
        int ret;
@@ -318,24 +324,30 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
                if (dirty)
                        vmw_validation_res_set_dirty(sw_context->ctx,
                                                     rcache->private, dirty);
-               vmw_user_resource_noref_release();
                return 0;
        }
 
-       priv_size = vmw_execbuf_res_size(dev_priv, res_type);
-       ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
-                                         dirty, (void **)&ctx_info,
-                                         &first_usage);
-       vmw_user_resource_noref_release();
-       if (ret)
-               return ret;
+       if ((flags & vmw_val_add_flag_noctx) != 0) {
+               ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
+                                                 (void **)&ctx_info, NULL);
+               if (ret)
+                       return ret;
 
-       if (priv_size && first_usage) {
-               ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
-                                             ctx_info);
-               if (ret) {
-                       VMW_DEBUG_USER("Failed first usage context setup.\n");
+       } else {
+               priv_size = vmw_execbuf_res_size(dev_priv, res_type);
+               ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
+                                                 dirty, (void **)&ctx_info,
+                                                 &first_usage);
+               if (ret)
                        return ret;
+
+               if (priv_size && first_usage) {
+                       ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
+                                                     ctx_info);
+                       if (ret) {
+                               VMW_DEBUG_USER("Failed first usage context setup.\n");
+                               return ret;
+                       }
                }
        }
 
@@ -343,43 +355,6 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context,
        return 0;
 }
 
-/**
- * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource
- * validation list if it's not already on it
- *
- * @sw_context: Pointer to the software context.
- * @res: Pointer to the resource.
- * @dirty: Whether to change dirty status.
- *
- * Returns: Zero on success. Negative error code on failure.
- */
-static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context,
-                                        struct vmw_resource *res,
-                                        u32 dirty)
-{
-       struct vmw_res_cache_entry *rcache;
-       enum vmw_res_type res_type = vmw_res_type(res);
-       void *ptr;
-       int ret;
-
-       rcache = &sw_context->res_cache[res_type];
-       if (likely(rcache->valid && rcache->res == res)) {
-               if (dirty)
-                       vmw_validation_res_set_dirty(sw_context->ctx,
-                                                    rcache->private, dirty);
-               return 0;
-       }
-
-       ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
-                                         &ptr, NULL);
-       if (ret)
-               return ret;
-
-       vmw_execbuf_rcache_update(rcache, res, ptr);
-
-       return 0;
-}
-
 /**
  * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
  * validation list
@@ -398,13 +373,13 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
         * First add the resource the view is pointing to, otherwise it may be
         * swapped out when the view is validated.
         */
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view),
-                                           vmw_view_dirtying(view));
+       ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
+                                     vmw_view_dirtying(view), vmw_val_add_flag_noctx);
        if (ret)
                return ret;
 
-       return vmw_execbuf_res_noctx_val_add(sw_context, view,
-                                            VMW_RES_DIRTY_NONE);
+       return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
+                                      vmw_val_add_flag_noctx);
 }
 
 /**
@@ -475,8 +450,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                        if (IS_ERR(res))
                                continue;
 
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_SET);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_SET,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
                }
@@ -490,9 +466,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
                if (vmw_res_type(entry->res) == vmw_res_view)
                        ret = vmw_view_res_val_add(sw_context, entry->res);
                else
-                       ret = vmw_execbuf_res_noctx_val_add
-                               (sw_context, entry->res,
-                                vmw_binding_dirtying(entry->bt));
+                       ret = vmw_execbuf_res_val_add(sw_context, entry->res,
+                                                     vmw_binding_dirtying(entry->bt),
+                                                     vmw_val_add_flag_noctx);
                if (unlikely(ret != 0))
                        break;
        }
@@ -658,7 +634,8 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
 {
        struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
        struct vmw_resource *res;
-       int ret;
+       int ret = 0;
+       bool needs_unref = false;
 
        if (p_res)
                *p_res = NULL;
@@ -683,17 +660,18 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
                if (ret)
                        return ret;
 
-               res = vmw_user_resource_noref_lookup_handle
-                       (dev_priv, sw_context->fp->tfile, *id_loc, converter);
-               if (IS_ERR(res)) {
+               ret = vmw_user_resource_lookup_handle
+                       (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
+               if (ret != 0) {
                        VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
                                       (unsigned int) *id_loc);
-                       return PTR_ERR(res);
+                       return ret;
                }
+               needs_unref = true;
 
-               ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty);
+               ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
                if (unlikely(ret != 0))
-                       return ret;
+                       goto res_check_done;
 
                if (rcache->valid && rcache->res == res) {
                        rcache->valid_handle = true;
@@ -708,7 +686,11 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
        if (p_res)
                *p_res = res;
 
-       return 0;
+res_check_done:
+       if (needs_unref)
+               vmw_resource_unreference(&res);
+
+       return ret;
 }
 
 /**
@@ -1171,9 +1153,9 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use MOB buffer.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, true, false);
@@ -1225,9 +1207,9 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
        int ret;
 
        vmw_validation_preload_bo(sw_context->ctx);
-       vmw_bo = vmw_user_bo_noref_lookup(sw_context->filp, handle);
-       if (IS_ERR(vmw_bo)) {
-               VMW_DEBUG_USER("Could not find or use GMR region.\n");
+       ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
+       if (ret != 0) {
+               drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
                return PTR_ERR(vmw_bo);
        }
        ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo, false, false);
@@ -2025,8 +2007,9 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
                res = vmw_shader_lookup(vmw_context_res_man(ctx),
                                        cmd->body.shid, cmd->body.type);
                if (!IS_ERR(res)) {
-                       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                           VMW_RES_DIRTY_NONE);
+                       ret = vmw_execbuf_res_val_add(sw_context, res,
+                                                     VMW_RES_DIRTY_NONE,
+                                                     vmw_val_add_flag_noctx);
                        if (unlikely(ret != 0))
                                return ret;
 
@@ -2273,8 +2256,9 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
                        return PTR_ERR(res);
                }
 
-               ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                                   VMW_RES_DIRTY_NONE);
+               ret = vmw_execbuf_res_val_add(sw_context, res,
+                                             VMW_RES_DIRTY_NONE,
+                                             vmw_val_add_flag_noctx);
                if (ret)
                        return ret;
        }
@@ -2777,8 +2761,8 @@ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
                return PTR_ERR(res);
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                VMW_DEBUG_USER("Error creating resource validation node.\n");
                return ret;
@@ -3098,8 +3082,8 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
 
        vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -3148,8 +3132,8 @@ static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
                return 0;
        }
 
-       ret = vmw_execbuf_res_noctx_val_add(sw_context, res,
-                                           VMW_RES_DIRTY_NONE);
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
+                                     vmw_val_add_flag_noctx);
        if (ret) {
                DRM_ERROR("Error creating resource validation node.\n");
                return ret;
@@ -4066,22 +4050,26 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
        if (ret)
                return ret;
 
-       res = vmw_user_resource_noref_lookup_handle
+       ret = vmw_user_resource_lookup_handle
                (dev_priv, sw_context->fp->tfile, handle,
-                user_context_converter);
-       if (IS_ERR(res)) {
+                user_context_converter, &res);
+       if (ret != 0) {
                VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
                               (unsigned int) handle);
-               return PTR_ERR(res);
+               return ret;
        }
 
-       ret = vmw_execbuf_res_noref_val_add(sw_context, res, VMW_RES_DIRTY_SET);
-       if (unlikely(ret != 0))
+       ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
+                                     vmw_val_add_flag_none);
+       if (unlikely(ret != 0)) {
+               vmw_resource_unreference(&res);
                return ret;
+       }
 
        sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
        sw_context->man = vmw_context_res_man(res);
 
+       vmw_resource_unreference(&res);
        return 0;
 }
 
index f66caa540e1460534b2d417d0dbab267577afffe..c7d645e5ec7bf84e04172d55577fbef7f767498b 100644 (file)
@@ -281,39 +281,6 @@ out_bad_resource:
        return ret;
 }
 
-/**
- * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
- * TTM user-space handle and perform basic type checks
- *
- * @dev_priv:     Pointer to a device private struct
- * @tfile:        Pointer to a struct ttm_object_file identifying the caller
- * @handle:       The TTM user-space handle
- * @converter:    Pointer to an object describing the resource type
- *
- * If the handle can't be found or is associated with an incorrect resource
- * type, -EINVAL will be returned.
- */
-struct vmw_resource *
-vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
-                                     struct ttm_object_file *tfile,
-                                     uint32_t handle,
-                                     const struct vmw_user_resource_conv
-                                     *converter)
-{
-       struct ttm_base_object *base;
-
-       base = ttm_base_object_noref_lookup(tfile, handle);
-       if (!base)
-               return ERR_PTR(-ESRCH);
-
-       if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
-               ttm_base_object_noref_release();
-               return ERR_PTR(-EINVAL);
-       }
-
-       return converter->base_obj_to_res(base);
-}
-
 /*
  * Helper function that looks either a surface or bo.
  *
index 0d8e6bd1ccbf207d0dd2e276ae7d19adf704d006..90996c108146dbf2e12bb945af1099bc710c288b 100644 (file)
@@ -717,7 +717,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        return xenbus_switch_state(xb_dev, XenbusStateInitialising);
 }
 
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
 {
        struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
        int to = 100;
@@ -751,7 +751,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
 
        xen_drm_drv_fini(front_info);
        xenbus_frontend_closed(dev);
-       return 0;
 }
 
 static const struct xenbus_device_id xen_driver_ids[] = {
index ab125f79408f253c639eda94783a83994e630e41..1fb0f7105fb21ba3b68f0034b45b2db90fa6d37b 100644 (file)
@@ -282,7 +282,7 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
                }
                rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
                if (rc)
-                       return rc;
+                       goto cleanup;
                mp2_ops->start(privdata, info);
                status = amd_sfh_wait_for_response
                                (privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
index 4da2f9f62aba3828d31a5ed8135943178b7e59d1..a1d6e08fab7d4330334839bb83e7f45394612e9e 100644 (file)
@@ -160,7 +160,7 @@ static int amd_sfh1_1_hid_client_init(struct amd_mp2_dev *privdata)
                }
                rc = mp2_ops->get_rep_desc(cl_idx, cl_data->report_descr[i]);
                if (rc)
-                       return rc;
+                       goto cleanup;
 
                writel(0, privdata->mmio + AMD_P2C_MSG(0));
                mp2_ops->start(privdata, info);
index 467d789f9bc2d3a0d846c661b618d1b640a63188..25ed7b9a917e4f2e1b1828caf039d691b0a07fca 100644 (file)
@@ -60,7 +60,6 @@ static int betopff_init(struct hid_device *hid)
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct input_dev *dev;
-       int field_count = 0;
        int error;
        int i, j;
 
@@ -86,19 +85,21 @@ static int betopff_init(struct hid_device *hid)
         * -----------------------------------------
         * Do init them with default value.
         */
+       if (report->maxfield < 4) {
+               hid_err(hid, "not enough fields in the report: %d\n",
+                               report->maxfield);
+               return -ENODEV;
+       }
        for (i = 0; i < report->maxfield; i++) {
+               if (report->field[i]->report_count < 1) {
+                       hid_err(hid, "no values in the field\n");
+                       return -ENODEV;
+               }
                for (j = 0; j < report->field[i]->report_count; j++) {
                        report->field[i]->value[j] = 0x00;
-                       field_count++;
                }
        }
 
-       if (field_count < 4) {
-               hid_err(hid, "not enough fields in the report: %d\n",
-                               field_count);
-               return -ENODEV;
-       }
-
        betopff = kzalloc(sizeof(*betopff), GFP_KERNEL);
        if (!betopff)
                return -ENOMEM;
index e8c5e3ac9fff1596b4962697cbf42be116c9d600..e8b16665860d6b04609d814f89bc048ec45274c8 100644 (file)
@@ -344,6 +344,11 @@ static int bigben_probe(struct hid_device *hid,
        }
 
        report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
+       if (list_empty(report_list)) {
+               hid_err(hid, "no output report found\n");
+               error = -ENODEV;
+               goto error_hw_stop;
+       }
        bigben->report = list_entry(report_list->next,
                struct hid_report, list);
 
index bd47628da6be0da0c9fa228727007f4c8a99b607..3e1803592bd4a224facc6111634fd7e34226ada1 100644 (file)
@@ -993,8 +993,8 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
                 * Validating on id 0 means we should examine the first
                 * report in the list.
                 */
-               report = list_entry(
-                               hid->report_enum[type].report_list.next,
+               report = list_first_entry_or_null(
+                               &hid->report_enum[type].report_list,
                                struct hid_report, list);
        } else {
                report = hid->report_enum[type].report_id_hash[id];
index 82713ef3aaa64c5c310700c615755c5dab3e4706..0f8c11842a3a588ab0c9ee87ec37720827b86d50 100644 (file)
 #define USB_DEVICE_ID_CH_AXIS_295      0x001c
 
 #define USB_VENDOR_ID_CHERRY           0x046a
-#define USB_DEVICE_ID_CHERRY_MOUSE_000C        0x000c
 #define USB_DEVICE_ID_CHERRY_CYMOTION  0x0023
 #define USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR    0x0027
 
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G540   0x0075
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640   0x0094
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01 0x0042
+#define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2      0x0905
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L 0x0935
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_S     0x0909
 #define USB_DEVICE_ID_UGEE_XPPEN_TABLET_STAR06 0x0078
index f399bf0d3c8cc33d117d0b060960e611619acc3c..27c40894acab40252da8f917b27902791ec6d176 100644 (file)
@@ -944,6 +944,7 @@ ATTRIBUTE_GROUPS(ps_device);
 
 static int dualsense_get_calibration_data(struct dualsense *ds)
 {
+       struct hid_device *hdev = ds->base.hdev;
        short gyro_pitch_bias, gyro_pitch_plus, gyro_pitch_minus;
        short gyro_yaw_bias, gyro_yaw_plus, gyro_yaw_minus;
        short gyro_roll_bias, gyro_roll_plus, gyro_roll_minus;
@@ -954,6 +955,7 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        int speed_2x;
        int range_2g;
        int ret = 0;
+       int i;
        uint8_t *buf;
 
        buf = kzalloc(DS_FEATURE_REPORT_CALIBRATION_SIZE, GFP_KERNEL);
@@ -1005,6 +1007,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        ds->gyro_calib_data[2].sens_numer = speed_2x*DS_GYRO_RES_PER_DEG_S;
        ds->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
 
+       /*
+        * Sanity check gyro calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing
+        * calibration data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds->gyro_calib_data); i++) {
+               if (ds->gyro_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
+                                       ds->gyro_calib_data[i].abs_code);
+                       ds->gyro_calib_data[i].bias = 0;
+                       ds->gyro_calib_data[i].sens_numer = DS_GYRO_RANGE;
+                       ds->gyro_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
        /*
         * Set accelerometer calibration and normalization parameters.
         * Data values will be normalized to 1/DS_ACC_RES_PER_G g.
@@ -1027,6 +1044,21 @@ static int dualsense_get_calibration_data(struct dualsense *ds)
        ds->accel_calib_data[2].sens_numer = 2*DS_ACC_RES_PER_G;
        ds->accel_calib_data[2].sens_denom = range_2g;
 
+       /*
+        * Sanity check accelerometer calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing calibration
+        * data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds->accel_calib_data); i++) {
+               if (ds->accel_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+                                       ds->accel_calib_data[i].abs_code);
+                       ds->accel_calib_data[i].bias = 0;
+                       ds->accel_calib_data[i].sens_numer = DS_ACC_RANGE;
+                       ds->accel_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
 err_free:
        kfree(buf);
        return ret;
@@ -1737,6 +1769,7 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        int speed_2x;
        int range_2g;
        int ret = 0;
+       int i;
        uint8_t *buf;
 
        if (ds4->base.hdev->bus == BUS_USB) {
@@ -1830,6 +1863,21 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        ds4->gyro_calib_data[2].sens_numer = speed_2x*DS4_GYRO_RES_PER_DEG_S;
        ds4->gyro_calib_data[2].sens_denom = gyro_roll_plus - gyro_roll_minus;
 
+       /*
+        * Sanity check gyro calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing
+        * calibration data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds4->gyro_calib_data); i++) {
+               if (ds4->gyro_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid gyro calibration data for axis (%d), disabling calibration.",
+                                       ds4->gyro_calib_data[i].abs_code);
+                       ds4->gyro_calib_data[i].bias = 0;
+                       ds4->gyro_calib_data[i].sens_numer = DS4_GYRO_RANGE;
+                       ds4->gyro_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
        /*
         * Set accelerometer calibration and normalization parameters.
         * Data values will be normalized to 1/DS4_ACC_RES_PER_G g.
@@ -1852,6 +1900,21 @@ static int dualshock4_get_calibration_data(struct dualshock4 *ds4)
        ds4->accel_calib_data[2].sens_numer = 2*DS4_ACC_RES_PER_G;
        ds4->accel_calib_data[2].sens_denom = range_2g;
 
+       /*
+        * Sanity check accelerometer calibration data. This is needed to prevent crashes
+        * during report handling of virtual, clone or broken devices not implementing calibration
+        * data properly.
+        */
+       for (i = 0; i < ARRAY_SIZE(ds4->accel_calib_data); i++) {
+               if (ds4->accel_calib_data[i].sens_denom == 0) {
+                       hid_warn(hdev, "Invalid accelerometer calibration data for axis (%d), disabling calibration.",
+                                       ds4->accel_calib_data[i].abs_code);
+                       ds4->accel_calib_data[i].bias = 0;
+                       ds4->accel_calib_data[i].sens_numer = DS4_ACC_RANGE;
+                       ds4->accel_calib_data[i].sens_denom = S16_MAX;
+               }
+       }
+
 err_free:
        kfree(buf);
        return ret;
index 0e9702c7f7d6c09613107fe46aba779e86de02dc..be3ad02573de816cde758cf2fd82f0d864386885 100644 (file)
@@ -54,7 +54,6 @@ static const struct hid_device_id hid_quirks[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_YOKE), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_PEDALS), HID_QUIRK_NOGET },
        { HID_USB_DEVICE(USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_PRO_THROTTLE), HID_QUIRK_NOGET },
-       { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_MOUSE_000C), HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB), HID_QUIRK_NO_INIT_REPORTS },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K65RGB_RAPIDFIRE), HID_QUIRK_NO_INIT_REPORTS | HID_QUIRK_ALWAYS_POLL },
        { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K70RGB), HID_QUIRK_NO_INIT_REPORTS },
index 7fa6fe04f1b262656186ccd7a4c9f5fcb52679c8..cfbbc39807a69f80bfe100ba04c96c297418220a 100644 (file)
@@ -525,6 +525,8 @@ static const struct hid_device_id uclogic_devices[] = {
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_G640) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
+                               USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
                                USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L) },
        { HID_USB_DEVICE(USB_VENDOR_ID_UGEE,
index cd1233d7e2535ff0926d09cb027bb6750a05595a..3c5eea3df3288bed9a51b3bb4daa9c803313d9a5 100644 (file)
@@ -1655,6 +1655,8 @@ int uclogic_params_init(struct uclogic_params *params,
                break;
        case VID_PID(USB_VENDOR_ID_UGEE,
                     USB_DEVICE_ID_UGEE_PARBLO_A610_PRO):
+       case VID_PID(USB_VENDOR_ID_UGEE,
+                    USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO01_V2):
        case VID_PID(USB_VENDOR_ID_UGEE,
                     USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L):
        case VID_PID(USB_VENDOR_ID_UGEE,
index 40554c8daca07bf4e0d78b05c22d30ceff82e2c4..00046cbfd4ed07bef8e8893e1723c4d62c46ea16 100644 (file)
@@ -104,6 +104,11 @@ void *ishtp_cl_get_dma_send_buf(struct ishtp_device *dev,
        int required_slots = (size / DMA_SLOT_SIZE)
                + 1 * (size % DMA_SLOT_SIZE != 0);
 
+       if (!dev->ishtp_dma_tx_map) {
+               dev_err(dev->devc, "Fail to allocate Tx map\n");
+               return NULL;
+       }
+
        spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
        for (i = 0; i <= (dev->ishtp_dma_num_slots - required_slots); i++) {
                free = 1;
@@ -150,6 +155,11 @@ void ishtp_cl_release_dma_acked_mem(struct ishtp_device *dev,
                return;
        }
 
+       if (!dev->ishtp_dma_tx_map) {
+               dev_err(dev->devc, "Fail to allocate Tx map\n");
+               return;
+       }
+
        i = (msg_addr - dev->ishtp_host_dma_tx_buf) / DMA_SLOT_SIZE;
        spin_lock_irqsave(&dev->ishtp_dma_tx_lock, flags);
        for (j = 0; j < acked_slots; j++) {
index 26b021f43ba40fbe930c95cdbd3fab30e5765eb8..11b1c1603aeb44e6d0dcbc3904a57a5bf314d03e 100644 (file)
@@ -2957,15 +2957,18 @@ EXPORT_SYMBOL(__rdma_block_iter_start);
 bool __rdma_block_iter_next(struct ib_block_iter *biter)
 {
        unsigned int block_offset;
+       unsigned int sg_delta;
 
        if (!biter->__sg_nents || !biter->__sg)
                return false;
 
        biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
        block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
-       biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
+       sg_delta = BIT_ULL(biter->__pg_bit) - block_offset;
 
-       if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
+       if (sg_dma_len(biter->__sg) - biter->__sg_advance > sg_delta) {
+               biter->__sg_advance += sg_delta;
+       } else {
                biter->__sg_advance = 0;
                biter->__sg = sg_next(biter->__sg);
                biter->__sg_nents--;
index 186d3029126069b9f3184dc055831d5937e80429..b02f2f0809c81312a4722ddbe7a8f43722633884 100644 (file)
@@ -23,18 +23,25 @@ static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
                              const struct mmu_notifier_range *range,
                              unsigned long cur_seq);
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+                                const struct mmu_notifier_range *range,
+                                unsigned long cur_seq);
 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
                            struct tid_group *grp,
                            unsigned int start, u16 count,
                            u32 *tidlist, unsigned int *tididx,
                            unsigned int *pmapped);
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
-                             struct tid_group **grp);
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo);
+static void __clear_tid_node(struct hfi1_filedata *fd,
+                            struct tid_rb_node *node);
 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
 
 static const struct mmu_interval_notifier_ops tid_mn_ops = {
        .invalidate = tid_rb_invalidate,
 };
+static const struct mmu_interval_notifier_ops tid_cover_ops = {
+       .invalidate = tid_cover_invalidate,
+};
 
 /*
  * Initialize context and file private data needed for Expected
@@ -253,53 +260,65 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
                tididx = 0, mapped, mapped_pages = 0;
        u32 *tidlist = NULL;
        struct tid_user_buf *tidbuf;
+       unsigned long mmu_seq = 0;
 
        if (!PAGE_ALIGNED(tinfo->vaddr))
                return -EINVAL;
+       if (tinfo->length == 0)
+               return -EINVAL;
 
        tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
        if (!tidbuf)
                return -ENOMEM;
 
+       mutex_init(&tidbuf->cover_mutex);
        tidbuf->vaddr = tinfo->vaddr;
        tidbuf->length = tinfo->length;
        tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
                                GFP_KERNEL);
        if (!tidbuf->psets) {
-               kfree(tidbuf);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail_release_mem;
+       }
+
+       if (fd->use_mn) {
+               ret = mmu_interval_notifier_insert(
+                       &tidbuf->notifier, current->mm,
+                       tidbuf->vaddr, tidbuf->npages * PAGE_SIZE,
+                       &tid_cover_ops);
+               if (ret)
+                       goto fail_release_mem;
+               mmu_seq = mmu_interval_read_begin(&tidbuf->notifier);
        }
 
        pinned = pin_rcv_pages(fd, tidbuf);
        if (pinned <= 0) {
-               kfree(tidbuf->psets);
-               kfree(tidbuf);
-               return pinned;
+               ret = (pinned < 0) ? pinned : -ENOSPC;
+               goto fail_unpin;
        }
 
        /* Find sets of physically contiguous pages */
        tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
 
-       /*
-        * We don't need to access this under a lock since tid_used is per
-        * process and the same process cannot be in hfi1_user_exp_rcv_clear()
-        * and hfi1_user_exp_rcv_setup() at the same time.
-        */
+       /* Reserve the number of expected tids to be used. */
        spin_lock(&fd->tid_lock);
        if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
                pageset_count = fd->tid_limit - fd->tid_used;
        else
                pageset_count = tidbuf->n_psets;
+       fd->tid_used += pageset_count;
        spin_unlock(&fd->tid_lock);
 
-       if (!pageset_count)
-               goto bail;
+       if (!pageset_count) {
+               ret = -ENOSPC;
+               goto fail_unreserve;
+       }
 
        ngroups = pageset_count / dd->rcv_entries.group_size;
        tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
        if (!tidlist) {
                ret = -ENOMEM;
-               goto nomem;
+               goto fail_unreserve;
        }
 
        tididx = 0;
@@ -395,43 +414,78 @@ int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
        }
 unlock:
        mutex_unlock(&uctxt->exp_mutex);
-nomem:
        hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
                  mapped_pages, ret);
-       if (tididx) {
-               spin_lock(&fd->tid_lock);
-               fd->tid_used += tididx;
-               spin_unlock(&fd->tid_lock);
-               tinfo->tidcnt = tididx;
-               tinfo->length = mapped_pages * PAGE_SIZE;
-
-               if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
-                                tidlist, sizeof(tidlist[0]) * tididx)) {
-                       /*
-                        * On failure to copy to the user level, we need to undo
-                        * everything done so far so we don't leak resources.
-                        */
-                       tinfo->tidlist = (unsigned long)&tidlist;
-                       hfi1_user_exp_rcv_clear(fd, tinfo);
-                       tinfo->tidlist = 0;
-                       ret = -EFAULT;
-                       goto bail;
+
+       /* fail if nothing was programmed, set error if none provided */
+       if (tididx == 0) {
+               if (ret >= 0)
+                       ret = -ENOSPC;
+               goto fail_unreserve;
+       }
+
+       /* adjust reserved tid_used to actual count */
+       spin_lock(&fd->tid_lock);
+       fd->tid_used -= pageset_count - tididx;
+       spin_unlock(&fd->tid_lock);
+
+       /* unpin all pages not covered by a TID */
+       unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages, pinned - mapped_pages,
+                       false);
+
+       if (fd->use_mn) {
+               /* check for an invalidate during setup */
+               bool fail = false;
+
+               mutex_lock(&tidbuf->cover_mutex);
+               fail = mmu_interval_read_retry(&tidbuf->notifier, mmu_seq);
+               mutex_unlock(&tidbuf->cover_mutex);
+
+               if (fail) {
+                       ret = -EBUSY;
+                       goto fail_unprogram;
                }
        }
 
-       /*
-        * If not everything was mapped (due to insufficient RcvArray entries,
-        * for example), unpin all unmapped pages so we can pin them nex time.
-        */
-       if (mapped_pages != pinned)
-               unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
-                               (pinned - mapped_pages), false);
-bail:
+       tinfo->tidcnt = tididx;
+       tinfo->length = mapped_pages * PAGE_SIZE;
+
+       if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
+                        tidlist, sizeof(tidlist[0]) * tididx)) {
+               ret = -EFAULT;
+               goto fail_unprogram;
+       }
+
+       if (fd->use_mn)
+               mmu_interval_notifier_remove(&tidbuf->notifier);
+       kfree(tidbuf->pages);
        kfree(tidbuf->psets);
+       kfree(tidbuf);
        kfree(tidlist);
+       return 0;
+
+fail_unprogram:
+       /* unprogram, unmap, and unpin all allocated TIDs */
+       tinfo->tidlist = (unsigned long)tidlist;
+       hfi1_user_exp_rcv_clear(fd, tinfo);
+       tinfo->tidlist = 0;
+       pinned = 0;             /* nothing left to unpin */
+       pageset_count = 0;      /* nothing left reserved */
+fail_unreserve:
+       spin_lock(&fd->tid_lock);
+       fd->tid_used -= pageset_count;
+       spin_unlock(&fd->tid_lock);
+fail_unpin:
+       if (fd->use_mn)
+               mmu_interval_notifier_remove(&tidbuf->notifier);
+       if (pinned > 0)
+               unpin_rcv_pages(fd, tidbuf, NULL, 0, pinned, false);
+fail_release_mem:
        kfree(tidbuf->pages);
+       kfree(tidbuf->psets);
        kfree(tidbuf);
-       return ret > 0 ? 0 : ret;
+       kfree(tidlist);
+       return ret;
 }
 
 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
@@ -452,7 +506,7 @@ int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
 
        mutex_lock(&uctxt->exp_mutex);
        for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
-               ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
+               ret = unprogram_rcvarray(fd, tidinfo[tididx]);
                if (ret) {
                        hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
                                  ret);
@@ -706,6 +760,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
        }
 
        node->fdata = fd;
+       mutex_init(&node->invalidate_mutex);
        node->phys = page_to_phys(pages[0]);
        node->npages = npages;
        node->rcventry = rcventry;
@@ -721,11 +776,6 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
                        &tid_mn_ops);
                if (ret)
                        goto out_unmap;
-               /*
-                * FIXME: This is in the wrong order, the notifier should be
-                * established before the pages are pinned by pin_rcv_pages.
-                */
-               mmu_interval_read_begin(&node->notifier);
        }
        fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
 
@@ -745,8 +795,7 @@ out_unmap:
        return -EFAULT;
 }
 
-static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
-                             struct tid_group **grp)
+static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo)
 {
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
        struct hfi1_devdata *dd = uctxt->dd;
@@ -769,9 +818,6 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
        if (!node || node->rcventry != (uctxt->expected_base + rcventry))
                return -EBADF;
 
-       if (grp)
-               *grp = node->grp;
-
        if (fd->use_mn)
                mmu_interval_notifier_remove(&node->notifier);
        cacheless_tid_rb_remove(fd, node);
@@ -779,23 +825,34 @@ static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
        return 0;
 }
 
-static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+static void __clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
 {
        struct hfi1_ctxtdata *uctxt = fd->uctxt;
        struct hfi1_devdata *dd = uctxt->dd;
 
+       mutex_lock(&node->invalidate_mutex);
+       if (node->freed)
+               goto done;
+       node->freed = true;
+
        trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
                                 node->npages,
                                 node->notifier.interval_tree.start, node->phys,
                                 node->dma_addr);
 
-       /*
-        * Make sure device has seen the write before we unpin the
-        * pages.
-        */
+       /* Make sure device has seen the write before pages are unpinned */
        hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
 
        unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
+done:
+       mutex_unlock(&node->invalidate_mutex);
+}
+
+static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
+{
+       struct hfi1_ctxtdata *uctxt = fd->uctxt;
+
+       __clear_tid_node(fd, node);
 
        node->grp->used--;
        node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
@@ -854,10 +911,16 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
        if (node->freed)
                return true;
 
+       /* take action only if unmapping */
+       if (range->event != MMU_NOTIFY_UNMAP)
+               return true;
+
        trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
                                 node->notifier.interval_tree.start,
                                 node->rcventry, node->npages, node->dma_addr);
-       node->freed = true;
+
+       /* clear the hardware rcvarray entry */
+       __clear_tid_node(fdata, node);
 
        spin_lock(&fdata->invalid_lock);
        if (fdata->invalid_tid_idx < uctxt->expected_count) {
@@ -887,6 +950,23 @@ static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
        return true;
 }
 
+static bool tid_cover_invalidate(struct mmu_interval_notifier *mni,
+                                const struct mmu_notifier_range *range,
+                                unsigned long cur_seq)
+{
+       struct tid_user_buf *tidbuf =
+               container_of(mni, struct tid_user_buf, notifier);
+
+       /* take action only if unmapping */
+       if (range->event == MMU_NOTIFY_UNMAP) {
+               mutex_lock(&tidbuf->cover_mutex);
+               mmu_interval_set_seq(mni, cur_seq);
+               mutex_unlock(&tidbuf->cover_mutex);
+       }
+
+       return true;
+}
+
 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
                                    struct tid_rb_node *tnode)
 {
index 8c53e416bf843cb662d5abab6e1ea03d11f5fc39..f8ee997d0050ee91bbeaa9384f0a0c6aebd3f9ed 100644 (file)
@@ -16,6 +16,8 @@ struct tid_pageset {
 };
 
 struct tid_user_buf {
+       struct mmu_interval_notifier notifier;
+       struct mutex cover_mutex;
        unsigned long vaddr;
        unsigned long length;
        unsigned int npages;
@@ -27,6 +29,7 @@ struct tid_user_buf {
 struct tid_rb_node {
        struct mmu_interval_notifier notifier;
        struct hfi1_filedata *fdata;
+       struct mutex invalidate_mutex; /* covers hw removal */
        unsigned long phys;
        struct tid_group *grp;
        u32 rcventry;
index 945758f39523659413417abdd3143da5ee29ccd2..3e1272695d993bff459db4b2035b4ca40fb51e51 100644 (file)
@@ -278,7 +278,6 @@ static int do_get_hw_stats(struct ib_device *ibdev,
        const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
        struct mlx5_core_dev *mdev;
        int ret, num_counters;
-       u32 mdev_port_num;
 
        if (!stats)
                return -EINVAL;
@@ -299,8 +298,9 @@ static int do_get_hw_stats(struct ib_device *ibdev,
        }
 
        if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
-               mdev = mlx5_ib_get_native_port_mdev(dev, port_num,
-                                                   &mdev_port_num);
+               if (!port_num)
+                       port_num = 1;
+               mdev = mlx5_ib_get_native_port_mdev(dev, port_num, NULL);
                if (!mdev) {
                        /* If port is not affiliated yet, its in down state
                         * which doesn't have any counters yet, so it would be
index 40d9410ec303343b1470b74428ee3f5db7ebf8b0..cf953d23d18dae8da53d51580e90e8f0f5eb8c9b 100644 (file)
@@ -4502,6 +4502,40 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev,
        return false;
 }
 
+static int validate_rd_atomic(struct mlx5_ib_dev *dev, struct ib_qp_attr *attr,
+                             int attr_mask, enum ib_qp_type qp_type)
+{
+       int log_max_ra_res;
+       int log_max_ra_req;
+
+       if (qp_type == MLX5_IB_QPT_DCI) {
+               log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+                                                  log_max_ra_res_dc);
+               log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+                                                  log_max_ra_req_dc);
+       } else {
+               log_max_ra_res = 1 << MLX5_CAP_GEN(dev->mdev,
+                                                  log_max_ra_res_qp);
+               log_max_ra_req = 1 << MLX5_CAP_GEN(dev->mdev,
+                                                  log_max_ra_req_qp);
+       }
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
+           attr->max_rd_atomic > log_max_ra_res) {
+               mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
+                           attr->max_rd_atomic);
+               return false;
+       }
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
+           attr->max_dest_rd_atomic > log_max_ra_req) {
+               mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
+                           attr->max_dest_rd_atomic);
+               return false;
+       }
+       return true;
+}
+
 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                      int attr_mask, struct ib_udata *udata)
 {
@@ -4589,21 +4623,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
        }
 
-       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic >
-           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_res_qp))) {
-               mlx5_ib_dbg(dev, "invalid max_rd_atomic value %d\n",
-                           attr->max_rd_atomic);
-               goto out;
-       }
-
-       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic >
-           (1 << MLX5_CAP_GEN(dev->mdev, log_max_ra_req_qp))) {
-               mlx5_ib_dbg(dev, "invalid max_dest_rd_atomic value %d\n",
-                           attr->max_dest_rd_atomic);
+       if (!validate_rd_atomic(dev, attr, attr_mask, qp_type))
                goto out;
-       }
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
                err = 0;
index a754fc902e3d190c1fbdf7ab3ecdeb59fef266de..7b41d79e72b2dffc7709838046fbc43081d46592 100644 (file)
@@ -98,11 +98,11 @@ enum rxe_device_param {
        RXE_MAX_SRQ                     = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
 
        RXE_MIN_MR_INDEX                = 0x00000001,
-       RXE_MAX_MR_INDEX                = DEFAULT_MAX_VALUE,
-       RXE_MAX_MR                      = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
-       RXE_MIN_MW_INDEX                = 0x00010001,
-       RXE_MAX_MW_INDEX                = 0x00020000,
-       RXE_MAX_MW                      = 0x00001000,
+       RXE_MAX_MR_INDEX                = DEFAULT_MAX_VALUE >> 1,
+       RXE_MAX_MR                      = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX,
+       RXE_MIN_MW_INDEX                = RXE_MAX_MR_INDEX + 1,
+       RXE_MAX_MW_INDEX                = DEFAULT_MAX_VALUE,
+       RXE_MAX_MW                      = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX,
 
        RXE_MAX_PKT_PER_ACK             = 64,
 
index f50620f5a0a14103d6ccf67316cd6b89ad03642d..1151c0b5cceaba60311d4ce91da85c69c7701eee 100644 (file)
@@ -23,16 +23,16 @@ static const struct rxe_type_info {
                .size           = sizeof(struct rxe_ucontext),
                .elem_offset    = offsetof(struct rxe_ucontext, elem),
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_UCONTEXT,
+               .max_elem       = RXE_MAX_UCONTEXT,
        },
        [RXE_TYPE_PD] = {
                .name           = "pd",
                .size           = sizeof(struct rxe_pd),
                .elem_offset    = offsetof(struct rxe_pd, elem),
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_PD,
+               .max_elem       = RXE_MAX_PD,
        },
        [RXE_TYPE_AH] = {
                .name           = "ah",
@@ -40,7 +40,7 @@ static const struct rxe_type_info {
                .elem_offset    = offsetof(struct rxe_ah, elem),
                .min_index      = RXE_MIN_AH_INDEX,
                .max_index      = RXE_MAX_AH_INDEX,
-               .max_elem       = RXE_MAX_AH_INDEX - RXE_MIN_AH_INDEX + 1,
+               .max_elem       = RXE_MAX_AH,
        },
        [RXE_TYPE_SRQ] = {
                .name           = "srq",
@@ -49,7 +49,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_srq_cleanup,
                .min_index      = RXE_MIN_SRQ_INDEX,
                .max_index      = RXE_MAX_SRQ_INDEX,
-               .max_elem       = RXE_MAX_SRQ_INDEX - RXE_MIN_SRQ_INDEX + 1,
+               .max_elem       = RXE_MAX_SRQ,
        },
        [RXE_TYPE_QP] = {
                .name           = "qp",
@@ -58,7 +58,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_qp_cleanup,
                .min_index      = RXE_MIN_QP_INDEX,
                .max_index      = RXE_MAX_QP_INDEX,
-               .max_elem       = RXE_MAX_QP_INDEX - RXE_MIN_QP_INDEX + 1,
+               .max_elem       = RXE_MAX_QP,
        },
        [RXE_TYPE_CQ] = {
                .name           = "cq",
@@ -66,8 +66,8 @@ static const struct rxe_type_info {
                .elem_offset    = offsetof(struct rxe_cq, elem),
                .cleanup        = rxe_cq_cleanup,
                .min_index      = 1,
-               .max_index      = UINT_MAX,
-               .max_elem       = UINT_MAX,
+               .max_index      = RXE_MAX_CQ,
+               .max_elem       = RXE_MAX_CQ,
        },
        [RXE_TYPE_MR] = {
                .name           = "mr",
@@ -76,7 +76,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_mr_cleanup,
                .min_index      = RXE_MIN_MR_INDEX,
                .max_index      = RXE_MAX_MR_INDEX,
-               .max_elem       = RXE_MAX_MR_INDEX - RXE_MIN_MR_INDEX + 1,
+               .max_elem       = RXE_MAX_MR,
        },
        [RXE_TYPE_MW] = {
                .name           = "mw",
@@ -85,7 +85,7 @@ static const struct rxe_type_info {
                .cleanup        = rxe_mw_cleanup,
                .min_index      = RXE_MIN_MW_INDEX,
                .max_index      = RXE_MAX_MW_INDEX,
-               .max_elem       = RXE_MAX_MW_INDEX - RXE_MIN_MW_INDEX + 1,
+               .max_elem       = RXE_MAX_MW,
        },
 };
 
index 00b0068fda208b5cec93658c1eaa58d999a58367..5d94db453df322abfe9496f6b9f24076a45b2bdd 100644 (file)
@@ -62,9 +62,6 @@ enum {
        SRP_DEFAULT_CMD_SQ_SIZE = SRP_DEFAULT_QUEUE_SIZE - SRP_RSP_SQ_SIZE -
                                  SRP_TSK_MGMT_SQ_SIZE,
 
-       SRP_TAG_NO_REQ          = ~0U,
-       SRP_TAG_TSK_MGMT        = 1U << 31,
-
        SRP_MAX_PAGES_PER_MR    = 512,
 
        SRP_MAX_ADD_CDB_LEN     = 16,
@@ -79,6 +76,11 @@ enum {
                                  sizeof(struct srp_imm_buf),
 };
 
+enum {
+       SRP_TAG_NO_REQ          = ~0U,
+       SRP_TAG_TSK_MGMT        = BIT(31),
+};
+
 enum srp_target_state {
        SRP_TARGET_SCANNING,
        SRP_TARGET_LIVE,
index 8d8ebdc2039b8eb68338c725ff77c34fdac77458..67f1c7364c95d3814de41e99c2ec3d6fdc61a79a 100644 (file)
@@ -51,7 +51,7 @@ module_param_array(ptr_size, int, NULL, 0444);
 MODULE_PARM_DESC(ptr_size,
        "Pointing device width, height in pixels (default 800,600)");
 
-static int xenkbd_remove(struct xenbus_device *);
+static void xenkbd_remove(struct xenbus_device *);
 static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
 static void xenkbd_disconnect_backend(struct xenkbd_info *);
 
@@ -404,7 +404,7 @@ static int xenkbd_resume(struct xenbus_device *dev)
        return xenkbd_connect_backend(dev, info);
 }
 
-static int xenkbd_remove(struct xenbus_device *dev)
+static void xenkbd_remove(struct xenbus_device *dev)
 {
        struct xenkbd_info *info = dev_get_drvdata(&dev->dev);
 
@@ -417,7 +417,6 @@ static int xenkbd_remove(struct xenbus_device *dev)
                input_unregister_device(info->mtouch);
        free_page((unsigned long)info->page);
        kfree(info);
-       return 0;
 }
 
 static int xenkbd_connect_backend(struct xenbus_device *dev,
index ba6781f54ab73a6d37de76272a694161710e1ac2..df3196f7253687248bbd00fe2099e6d531171f38 100644 (file)
@@ -488,7 +488,7 @@ int qnoc_probe(struct platform_device *pdev)
        }
 
 regmap_done:
-       ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
+       ret = devm_clk_bulk_get_optional(dev, qp->num_clks, qp->bus_clks);
        if (ret)
                return ret;
 
index c2903ae3b3bc3708fd325c12a5fd54e158186613..25a1a32bc611facde621e72729c798e4623040b1 100644 (file)
@@ -33,6 +33,13 @@ static const char * const bus_a0noc_clocks[] = {
        "aggre0_noc_mpu_cfg"
 };
 
+static const char * const bus_a2noc_clocks[] = {
+       "bus",
+       "bus_a",
+       "aggre2_ufs_axi",
+       "ufs_axi"
+};
+
 static const u16 mas_a0noc_common_links[] = {
        MSM8996_SLAVE_A0NOC_SNOC
 };
@@ -1806,7 +1813,7 @@ static const struct regmap_config msm8996_a0noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x9000,
+       .max_register   = 0x6000,
        .fast_io        = true
 };
 
@@ -1830,7 +1837,7 @@ static const struct regmap_config msm8996_a1noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x7000,
+       .max_register   = 0x5000,
        .fast_io        = true
 };
 
@@ -1851,7 +1858,7 @@ static const struct regmap_config msm8996_a2noc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0xa000,
+       .max_register   = 0x7000,
        .fast_io        = true
 };
 
@@ -1859,6 +1866,8 @@ static const struct qcom_icc_desc msm8996_a2noc = {
        .type = QCOM_ICC_NOC,
        .nodes = a2noc_nodes,
        .num_nodes = ARRAY_SIZE(a2noc_nodes),
+       .clocks = bus_a2noc_clocks,
+       .num_clocks = ARRAY_SIZE(bus_a2noc_clocks),
        .regmap_cfg = &msm8996_a2noc_regmap_config
 };
 
@@ -1877,7 +1886,7 @@ static const struct regmap_config msm8996_bimc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x62000,
+       .max_register   = 0x5a000,
        .fast_io        = true
 };
 
@@ -1988,7 +1997,7 @@ static const struct regmap_config msm8996_mnoc_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
        .val_bits       = 32,
-       .max_register   = 0x20000,
+       .max_register   = 0x1c000,
        .fast_io        = true
 };
 
index ab160198edd6b1d861af9459632d7da020d18ca9..f2425b0f0cd6252ba284576a5c77d45c05dbe401 100644 (file)
@@ -3858,7 +3858,9 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
 
 static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       arm_smmu_device_disable(smmu);
 }
 
 static const struct of_device_id arm_smmu_of_match[] = {
index 719fbca1fe52a0b329ccac1c3f8f1001b11194c7..2ff7a72cf3772ef29150248e637ab0b2ce91c320 100644 (file)
@@ -1316,8 +1316,14 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
 
        switch (cap) {
        case IOMMU_CAP_CACHE_COHERENCY:
-               /* Assume that a coherent TCU implies coherent TBUs */
-               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
+               /*
+                * It's overwhelmingly the case in practice that when the pagetable
+                * walk interface is connected to a coherent interconnect, all the
+                * translation interfaces are too. Furthermore if the device is
+                * natively coherent, then its translation interface must also be.
+                */
+               return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK ||
+                       device_get_dma_attr(dev) == DEV_DMA_COHERENT;
        case IOMMU_CAP_NOEXEC:
                return true;
        default:
@@ -2185,19 +2191,16 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int arm_smmu_device_remove(struct platform_device *pdev)
+static void arm_smmu_device_shutdown(struct platform_device *pdev)
 {
        struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
 
        if (!smmu)
-               return -ENODEV;
+               return;
 
        if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
                dev_notice(&pdev->dev, "disabling translation\n");
 
-       iommu_device_unregister(&smmu->iommu);
-       iommu_device_sysfs_remove(&smmu->iommu);
-
        arm_smmu_rpm_get(smmu);
        /* Turn the thing off */
        arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_sCR0, ARM_SMMU_sCR0_CLIENTPD);
@@ -2209,12 +2212,21 @@ static int arm_smmu_device_remove(struct platform_device *pdev)
                clk_bulk_disable(smmu->num_clks, smmu->clks);
 
        clk_bulk_unprepare(smmu->num_clks, smmu->clks);
-       return 0;
 }
 
-static void arm_smmu_device_shutdown(struct platform_device *pdev)
+static int arm_smmu_device_remove(struct platform_device *pdev)
 {
-       arm_smmu_device_remove(pdev);
+       struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+
+       if (!smmu)
+               return -ENODEV;
+
+       iommu_device_unregister(&smmu->iommu);
+       iommu_device_sysfs_remove(&smmu->iommu);
+
+       arm_smmu_device_shutdown(pdev);
+
+       return 0;
 }
 
 static int __maybe_unused arm_smmu_runtime_resume(struct device *dev)
index de91dd88705bd3dc108ac4d68ed52445d7faa9ff..5f6a85aea501ecc8c2a390cc63f5cb232cbe6e95 100644 (file)
@@ -3185,14 +3185,16 @@ EXPORT_SYMBOL_GPL(iommu_group_claim_dma_owner);
  */
 int iommu_device_claim_dma_owner(struct device *dev, void *owner)
 {
-       struct iommu_group *group = iommu_group_get(dev);
+       struct iommu_group *group;
        int ret = 0;
 
-       if (!group)
-               return -ENODEV;
        if (WARN_ON(!owner))
                return -EINVAL;
 
+       group = iommu_group_get(dev);
+       if (!group)
+               return -ENODEV;
+
        mutex_lock(&group->mutex);
        if (group->owner_cnt) {
                if (group->owner != owner) {
index a44ad92fc5eb70e18ca09c9e16329d268804ffe0..fe452ce466429a702b4df515e3c03298d418f136 100644 (file)
@@ -197,7 +197,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 
        curr = __get_cached_rbnode(iovad, limit_pfn);
        curr_iova = to_iova(curr);
-       retry_pfn = curr_iova->pfn_hi + 1;
+       retry_pfn = curr_iova->pfn_hi;
 
 retry:
        do {
@@ -211,7 +211,7 @@ retry:
        if (high_pfn < size || new_pfn < low_pfn) {
                if (low_pfn == iovad->start_pfn && retry_pfn < limit_pfn) {
                        high_pfn = limit_pfn;
-                       low_pfn = retry_pfn;
+                       low_pfn = retry_pfn + 1;
                        curr = iova_find_limit(iovad, limit_pfn);
                        curr_iova = to_iova(curr);
                        goto retry;
index 69682ee068d2bd88205c00fb12f1bfeda8dfa640..ca581ff1c76964554f6846e09627961b73a9f471 100644 (file)
@@ -683,7 +683,7 @@ static int mtk_iommu_v1_probe(struct platform_device *pdev)
        ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
                                     dev_name(&pdev->dev));
        if (ret)
-               return ret;
+               goto out_clk_unprepare;
 
        ret = iommu_device_register(&data->iommu, &mtk_iommu_v1_ops, dev);
        if (ret)
@@ -698,6 +698,8 @@ out_dev_unreg:
        iommu_device_unregister(&data->iommu);
 out_sysfs_remove:
        iommu_device_sysfs_remove(&data->iommu);
+out_clk_unprepare:
+       clk_disable_unprepare(data->bclk);
        return ret;
 }
 
index e1ea3a7bd9d9f869beafc24ef67ae7757f107915..b424a6ee27baf22a7ccbf22019d34c74cbc3bf7b 100644 (file)
@@ -1742,6 +1742,8 @@ static void dm_split_and_process_bio(struct mapped_device *md,
                 * otherwise associated queue_limits won't be imposed.
                 */
                bio = bio_split_to_limits(bio);
+               if (!bio)
+                       return;
        }
 
        init_clone_info(&ci, md, map, bio, is_abnormal);
index 775f1dde190a2eea4acfb51bafd47203fb3d877a..02b0240e7c715a0a4a7dde9af66835fd55df16dc 100644 (file)
@@ -455,6 +455,8 @@ static void md_submit_bio(struct bio *bio)
        }
 
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) {
                if (bio_sectors(bio) != 0)
@@ -3642,7 +3644,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init);
  */
 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor)
 {
-       static struct md_rdev *claim_rdev; /* just for claiming the bdev */
+       static struct md_rdev claim_rdev; /* just for claiming the bdev */
        struct md_rdev *rdev;
        sector_t size;
        int err;
@@ -3660,7 +3662,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
 
        rdev->bdev = blkdev_get_by_dev(newdev,
                        FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-                       super_format == -2 ? claim_rdev : rdev);
+                       super_format == -2 ? &claim_rdev : rdev);
        if (IS_ERR(rdev->bdev)) {
                pr_warn("md: could not open device unknown-block(%u,%u).\n",
                        MAJOR(newdev), MINOR(newdev));
index 9c49d00c2a966b2859e30d8699f6c1cfcd3f9caa..ea6e9e1eaf04687cd6a9cec822a72467782cbe4e 100644 (file)
@@ -47,19 +47,17 @@ static int atmel_ramc_probe(struct platform_device *pdev)
        caps = of_device_get_match_data(&pdev->dev);
 
        if (caps->has_ddrck) {
-               clk = devm_clk_get(&pdev->dev, "ddrck");
+               clk = devm_clk_get_enabled(&pdev->dev, "ddrck");
                if (IS_ERR(clk))
                        return PTR_ERR(clk);
-               clk_prepare_enable(clk);
        }
 
        if (caps->has_mpddr_clk) {
-               clk = devm_clk_get(&pdev->dev, "mpddr");
+               clk = devm_clk_get_enabled(&pdev->dev, "mpddr");
                if (IS_ERR(clk)) {
                        pr_err("AT91 RAMC: couldn't get mpddr clock\n");
                        return PTR_ERR(clk);
                }
-               clk_prepare_enable(clk);
        }
 
        return 0;
index 8450638e86700763d52bb85ec8131c27019bb33c..efc6c08db2b70a78ae2d6eb902305153fb8cf36e 100644 (file)
@@ -280,10 +280,9 @@ static int mvebu_devbus_probe(struct platform_device *pdev)
        if (IS_ERR(devbus->base))
                return PTR_ERR(devbus->base);
 
-       clk = devm_clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get_enabled(&pdev->dev, NULL);
        if (IS_ERR(clk))
                return PTR_ERR(clk);
-       clk_prepare_enable(clk);
 
        /*
         * Obtain clock period in picoseconds,
index 57d9f91fe89bf9100a40f69f6df88d14e721e625..d78f73db37c88c887fddb062bd14562d9aad660d 100644 (file)
@@ -1918,7 +1918,8 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p)
                }
        }
 
-       if (p->wait_pin > gpmc_nr_waitpins) {
+       if (p->wait_pin != GPMC_WAITPIN_INVALID &&
+           p->wait_pin > gpmc_nr_waitpins) {
                pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin);
                return -EINVAL;
        }
index 62477e592bf5f354cd6299f1b3e590a53811cb04..7bb73f06fad3edb3269744d5c3baa7744359233a 100644 (file)
 #define MC_SID_STREAMID_SECURITY_WRITE_ACCESS_DISABLED BIT(16)
 #define MC_SID_STREAMID_SECURITY_OVERRIDE BIT(8)
 
-static void tegra186_mc_program_sid(struct tegra_mc *mc)
-{
-       unsigned int i;
-
-       for (i = 0; i < mc->soc->num_clients; i++) {
-               const struct tegra_mc_client *client = &mc->soc->clients[i];
-               u32 override, security;
-
-               override = readl(mc->regs + client->regs.sid.override);
-               security = readl(mc->regs + client->regs.sid.security);
-
-               dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
-                       client->name, override, security);
-
-               dev_dbg(mc->dev, "setting SID %u for %s\n", client->sid,
-                       client->name);
-               writel(client->sid, mc->regs + client->regs.sid.override);
-
-               override = readl(mc->regs + client->regs.sid.override);
-               security = readl(mc->regs + client->regs.sid.security);
-
-               dev_dbg(mc->dev, "client %s: override: %x security: %x\n",
-                       client->name, override, security);
-       }
-}
-
 static int tegra186_mc_probe(struct tegra_mc *mc)
 {
        struct platform_device *pdev = to_platform_device(mc->dev);
@@ -85,8 +59,6 @@ populate:
        if (err < 0)
                return err;
 
-       tegra186_mc_program_sid(mc);
-
        return 0;
 }
 
@@ -95,13 +67,6 @@ static void tegra186_mc_remove(struct tegra_mc *mc)
        of_platform_depopulate(mc->dev);
 }
 
-static int tegra186_mc_resume(struct tegra_mc *mc)
-{
-       tegra186_mc_program_sid(mc);
-
-       return 0;
-}
-
 #if IS_ENABLED(CONFIG_IOMMU_API)
 static void tegra186_mc_client_sid_override(struct tegra_mc *mc,
                                            const struct tegra_mc_client *client,
@@ -173,7 +138,6 @@ static int tegra186_mc_probe_device(struct tegra_mc *mc, struct device *dev)
 const struct tegra_mc_ops tegra186_mc_ops = {
        .probe = tegra186_mc_probe,
        .remove = tegra186_mc_remove,
-       .resume = tegra186_mc_resume,
        .probe_device = tegra186_mc_probe_device,
        .handle_irq = tegra30_mc_handle_irq,
 };
index c9902a1dcf5d311043182ee87f4dbc5ec6085cae..5310606113fe5c56d5b41ac7b065802bdee07f22 100644 (file)
@@ -321,7 +321,7 @@ static void fastrpc_free_map(struct kref *ref)
                        perm.vmid = QCOM_SCM_VMID_HLOS;
                        perm.perm = QCOM_SCM_PERM_RWX;
                        err = qcom_scm_assign_mem(map->phys, map->size,
-                               &(map->fl->cctx->vmperms[0].vmid), &perm, 1);
+                               &map->fl->cctx->perms, &perm, 1);
                        if (err) {
                                dev_err(map->fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                                map->phys, map->size, err);
@@ -334,6 +334,13 @@ static void fastrpc_free_map(struct kref *ref)
                dma_buf_put(map->buf);
        }
 
+       if (map->fl) {
+               spin_lock(&map->fl->lock);
+               list_del(&map->node);
+               spin_unlock(&map->fl->lock);
+               map->fl = NULL;
+       }
+
        kfree(map);
 }
 
@@ -343,38 +350,41 @@ static void fastrpc_map_put(struct fastrpc_map *map)
                kref_put(&map->refcount, fastrpc_free_map);
 }
 
-static void fastrpc_map_get(struct fastrpc_map *map)
+static int fastrpc_map_get(struct fastrpc_map *map)
 {
-       if (map)
-               kref_get(&map->refcount);
+       if (!map)
+               return -ENOENT;
+
+       return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
 }
 
 
 static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
-                           struct fastrpc_map **ppmap)
+                           struct fastrpc_map **ppmap, bool take_ref)
 {
+       struct fastrpc_session_ctx *sess = fl->sctx;
        struct fastrpc_map *map = NULL;
+       int ret = -ENOENT;
 
-       mutex_lock(&fl->mutex);
+       spin_lock(&fl->lock);
        list_for_each_entry(map, &fl->maps, node) {
-               if (map->fd == fd) {
-                       *ppmap = map;
-                       mutex_unlock(&fl->mutex);
-                       return 0;
-               }
-       }
-       mutex_unlock(&fl->mutex);
-
-       return -ENOENT;
-}
+               if (map->fd != fd)
+                       continue;
 
-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
-                           struct fastrpc_map **ppmap)
-{
-       int ret = fastrpc_map_lookup(fl, fd, ppmap);
+               if (take_ref) {
+                       ret = fastrpc_map_get(map);
+                       if (ret) {
+                               dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
+                                       __func__, fd, ret);
+                               break;
+                       }
+               }
 
-       if (!ret)
-               fastrpc_map_get(*ppmap);
+               *ppmap = map;
+               ret = 0;
+               break;
+       }
+       spin_unlock(&fl->lock);
 
        return ret;
 }
@@ -746,7 +756,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
        struct fastrpc_map *map = NULL;
        int err = 0;
 
-       if (!fastrpc_map_find(fl, fd, ppmap))
+       if (!fastrpc_map_lookup(fl, fd, ppmap, true))
                return 0;
 
        map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -788,10 +798,8 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
                 * If subsystem VMIDs are defined in DTSI, then do
                 * hyp_assign from HLOS to those VM(s)
                 */
-               unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
                map->attr = attr;
-               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &perms,
+               err = qcom_scm_assign_mem(map->phys, (u64)map->size, &fl->cctx->perms,
                                fl->cctx->vmperms, fl->cctx->vmcount);
                if (err) {
                        dev_err(sess->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1070,7 +1078,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
        for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
                if (!fdlist[i])
                        break;
-               if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+               if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
                        fastrpc_map_put(mmap);
        }
 
@@ -1258,10 +1266,9 @@ static int fastrpc_init_create_static_process(struct fastrpc_user *fl,
 
                /* Map if we have any heap VMIDs associated with this ADSP Static Process. */
                if (fl->cctx->vmcount) {
-                       unsigned int perms = BIT(QCOM_SCM_VMID_HLOS);
-
                        err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
-                                                       (u64)fl->cctx->remote_heap->size, &perms,
+                                                       (u64)fl->cctx->remote_heap->size,
+                                                       &fl->cctx->perms,
                                                        fl->cctx->vmperms, fl->cctx->vmcount);
                        if (err) {
                                dev_err(fl->sctx->dev, "Failed to assign memory with phys 0x%llx size 0x%llx err %d",
@@ -1309,7 +1316,7 @@ err_invoke:
                perm.perm = QCOM_SCM_PERM_RWX;
                err = qcom_scm_assign_mem(fl->cctx->remote_heap->phys,
                                                (u64)fl->cctx->remote_heap->size,
-                                               &(fl->cctx->vmperms[0].vmid), &perm, 1);
+                                               &fl->cctx->perms, &perm, 1);
                if (err)
                        dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                fl->cctx->remote_heap->phys, fl->cctx->remote_heap->size, err);
@@ -1433,12 +1440,7 @@ err_invoke:
        fl->init_mem = NULL;
        fastrpc_buf_free(imem);
 err_alloc:
-       if (map) {
-               spin_lock(&fl->lock);
-               list_del(&map->node);
-               spin_unlock(&fl->lock);
-               fastrpc_map_put(map);
-       }
+       fastrpc_map_put(map);
 err:
        kfree(args);
 
@@ -1514,10 +1516,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file)
                fastrpc_context_put(ctx);
        }
 
-       list_for_each_entry_safe(map, m, &fl->maps, node) {
-               list_del(&map->node);
+       list_for_each_entry_safe(map, m, &fl->maps, node)
                fastrpc_map_put(map);
-       }
 
        list_for_each_entry_safe(buf, b, &fl->mmaps, node) {
                list_del(&buf->node);
@@ -1894,12 +1894,11 @@ static int fastrpc_req_mmap(struct fastrpc_user *fl, char __user *argp)
        /* Add memory to static PD pool, protection thru hypervisor */
        if (req.flags != ADSP_MMAP_REMOTE_HEAP_ADDR && fl->cctx->vmcount) {
                struct qcom_scm_vmperm perm;
-               int err = 0;
 
                perm.vmid = QCOM_SCM_VMID_HLOS;
                perm.perm = QCOM_SCM_PERM_RWX;
                err = qcom_scm_assign_mem(buf->phys, buf->size,
-                       &(fl->cctx->vmperms[0].vmid), &perm, 1);
+                       &fl->cctx->perms, &perm, 1);
                if (err) {
                        dev_err(fl->sctx->dev, "Failed to assign memory phys 0x%llx size 0x%llx err %d",
                                        buf->phys, buf->size, err);
index 4a08b624910a0a4114ba43f8b2c0dfdf6c47da1d..a81b890c7ee648255c099264bfccb214b2a56b1e 100644 (file)
@@ -702,13 +702,15 @@ void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
        if (cl->state == MEI_FILE_UNINITIALIZED) {
                ret = mei_cl_link(cl);
                if (ret)
-                       goto out;
+                       goto notlinked;
                /* update pointers */
                cl->cldev = cldev;
        }
 
        ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
-out:
+       if (ret)
+               mei_cl_unlink(cl);
+notlinked:
        mutex_unlock(&bus->device_lock);
        if (ret)
                return ERR_PTR(ret);
@@ -758,7 +760,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
        if (cl->state == MEI_FILE_UNINITIALIZED) {
                ret = mei_cl_link(cl);
                if (ret)
-                       goto out;
+                       goto notlinked;
                /* update pointers */
                cl->cldev = cldev;
        }
@@ -785,6 +787,9 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
        }
 
 out:
+       if (ret)
+               mei_cl_unlink(cl);
+notlinked:
        mutex_unlock(&bus->device_lock);
 
        return ret;
@@ -1277,7 +1282,6 @@ static void mei_cl_bus_dev_release(struct device *dev)
        mei_cl_flush_queues(cldev->cl, NULL);
        mei_me_cl_put(cldev->me_cl);
        mei_dev_bus_put(cldev->bus);
-       mei_cl_unlink(cldev->cl);
        kfree(cldev->cl);
        kfree(cldev);
 }
index 99966cd3e7d892ca0f86666614bc7f664e978251..bdc65d50b945fc3c8f65f576880ce006f917559f 100644 (file)
 
 #define MEI_DEV_ID_RPL_S      0x7A68  /* Raptor Lake Point S */
 
+#define MEI_DEV_ID_MTL_M      0x7E70  /* Meteor Lake Point M */
+
 /*
  * MEI HW Section
  */
index 704cd0caa172caec9bbd459dbe15ae7db879364c..5bf0d50d55a00b47391f34ac52dee239fd8145bd 100644 (file)
@@ -118,6 +118,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_MTL_M, MEI_ME_PCH15_CFG)},
+
        /* required last entry */
        {0, }
 };
index aa7b05de97dd52d84fa04615912af9df46f7fb5f..4f8d962bb5b2a388c80d03d5edbd280efcc325c6 100644 (file)
@@ -56,8 +56,6 @@ struct vmci_guest_device {
 
        bool exclusive_vectors;
 
-       struct tasklet_struct datagram_tasklet;
-       struct tasklet_struct bm_tasklet;
        struct wait_queue_head inout_wq;
 
        void *data_buffer;
@@ -304,9 +302,8 @@ static int vmci_check_host_caps(struct pci_dev *pdev)
  * This function assumes that it has exclusive access to the data
  * in register(s) for the duration of the call.
  */
-static void vmci_dispatch_dgs(unsigned long data)
+static void vmci_dispatch_dgs(struct vmci_guest_device *vmci_dev)
 {
-       struct vmci_guest_device *vmci_dev = (struct vmci_guest_device *)data;
        u8 *dg_in_buffer = vmci_dev->data_buffer;
        struct vmci_datagram *dg;
        size_t dg_in_buffer_size = VMCI_MAX_DG_SIZE;
@@ -465,10 +462,8 @@ static void vmci_dispatch_dgs(unsigned long data)
  * Scans the notification bitmap for raised flags, clears them
  * and handles the notifications.
  */
-static void vmci_process_bitmap(unsigned long data)
+static void vmci_process_bitmap(struct vmci_guest_device *dev)
 {
-       struct vmci_guest_device *dev = (struct vmci_guest_device *)data;
-
        if (!dev->notification_bitmap) {
                dev_dbg(dev->dev, "No bitmap present in %s\n", __func__);
                return;
@@ -486,13 +481,13 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
        struct vmci_guest_device *dev = _dev;
 
        /*
-        * If we are using MSI-X with exclusive vectors then we simply schedule
-        * the datagram tasklet, since we know the interrupt was meant for us.
+        * If we are using MSI-X with exclusive vectors then we simply call
+        * vmci_dispatch_dgs(), since we know the interrupt was meant for us.
         * Otherwise we must read the ICR to determine what to do.
         */
 
        if (dev->exclusive_vectors) {
-               tasklet_schedule(&dev->datagram_tasklet);
+               vmci_dispatch_dgs(dev);
        } else {
                unsigned int icr;
 
@@ -502,12 +497,12 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
                        return IRQ_NONE;
 
                if (icr & VMCI_ICR_DATAGRAM) {
-                       tasklet_schedule(&dev->datagram_tasklet);
+                       vmci_dispatch_dgs(dev);
                        icr &= ~VMCI_ICR_DATAGRAM;
                }
 
                if (icr & VMCI_ICR_NOTIFICATION) {
-                       tasklet_schedule(&dev->bm_tasklet);
+                       vmci_process_bitmap(dev);
                        icr &= ~VMCI_ICR_NOTIFICATION;
                }
 
@@ -536,7 +531,7 @@ static irqreturn_t vmci_interrupt_bm(int irq, void *_dev)
        struct vmci_guest_device *dev = _dev;
 
        /* For MSI-X we can just assume it was meant for us. */
-       tasklet_schedule(&dev->bm_tasklet);
+       vmci_process_bitmap(dev);
 
        return IRQ_HANDLED;
 }
@@ -638,10 +633,6 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
        vmci_dev->iobase = iobase;
        vmci_dev->mmio_base = mmio_base;
 
-       tasklet_init(&vmci_dev->datagram_tasklet,
-                    vmci_dispatch_dgs, (unsigned long)vmci_dev);
-       tasklet_init(&vmci_dev->bm_tasklet,
-                    vmci_process_bitmap, (unsigned long)vmci_dev);
        init_waitqueue_head(&vmci_dev->inout_wq);
 
        if (mmio_base != NULL) {
@@ -808,8 +799,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * Request IRQ for legacy or MSI interrupts, or for first
         * MSI-X vector.
         */
-       error = request_irq(pci_irq_vector(pdev, 0), vmci_interrupt,
-                           IRQF_SHARED, KBUILD_MODNAME, vmci_dev);
+       error = request_threaded_irq(pci_irq_vector(pdev, 0), NULL,
+                                    vmci_interrupt, IRQF_SHARED,
+                                    KBUILD_MODNAME, vmci_dev);
        if (error) {
                dev_err(&pdev->dev, "Irq %u in use: %d\n",
                        pci_irq_vector(pdev, 0), error);
@@ -823,9 +815,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
         * between the vectors.
         */
        if (vmci_dev->exclusive_vectors) {
-               error = request_irq(pci_irq_vector(pdev, 1),
-                                   vmci_interrupt_bm, 0, KBUILD_MODNAME,
-                                   vmci_dev);
+               error = request_threaded_irq(pci_irq_vector(pdev, 1), NULL,
+                                            vmci_interrupt_bm, 0,
+                                            KBUILD_MODNAME, vmci_dev);
                if (error) {
                        dev_err(&pdev->dev,
                                "Failed to allocate irq %u: %d\n",
@@ -833,9 +825,11 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
                        goto err_free_irq;
                }
                if (caps_in_use & VMCI_CAPS_DMA_DATAGRAM) {
-                       error = request_irq(pci_irq_vector(pdev, 2),
-                                           vmci_interrupt_dma_datagram,
-                                           0, KBUILD_MODNAME, vmci_dev);
+                       error = request_threaded_irq(pci_irq_vector(pdev, 2),
+                                                    NULL,
+                                                   vmci_interrupt_dma_datagram,
+                                                    0, KBUILD_MODNAME,
+                                                    vmci_dev);
                        if (error) {
                                dev_err(&pdev->dev,
                                        "Failed to allocate irq %u: %d\n",
@@ -871,8 +865,6 @@ err_free_bm_irq:
 
 err_free_irq:
        free_irq(pci_irq_vector(pdev, 0), vmci_dev);
-       tasklet_kill(&vmci_dev->datagram_tasklet);
-       tasklet_kill(&vmci_dev->bm_tasklet);
 
 err_disable_msi:
        pci_free_irq_vectors(pdev);
@@ -943,9 +935,6 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
        free_irq(pci_irq_vector(pdev, 0), vmci_dev);
        pci_free_irq_vectors(pdev);
 
-       tasklet_kill(&vmci_dev->datagram_tasklet);
-       tasklet_kill(&vmci_dev->bm_tasklet);
-
        if (vmci_dev->notification_bitmap) {
                /*
                 * The device reset above cleared the bitmap state of the
index 89ef0c80ac371166cce69067ad71a21960973f1f..9e73c34b64017a51e25d701e52f352cf23186fa6 100644 (file)
 #define ESDHC_TUNING_START_TAP_DEFAULT 0x1
 #define ESDHC_TUNING_START_TAP_MASK    0x7f
 #define ESDHC_TUNING_CMD_CRC_CHECK_DISABLE     (1 << 7)
+#define ESDHC_TUNING_STEP_DEFAULT      0x1
 #define ESDHC_TUNING_STEP_MASK         0x00070000
 #define ESDHC_TUNING_STEP_SHIFT                16
 
@@ -1368,7 +1369,7 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
        struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
        struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
        struct cqhci_host *cq_host = host->mmc->cqe_private;
-       int tmp;
+       u32 tmp;
 
        if (esdhc_is_usdhc(imx_data)) {
                /*
@@ -1423,17 +1424,24 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
 
                if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
                        tmp = readl(host->ioaddr + ESDHC_TUNING_CTRL);
-                       tmp |= ESDHC_STD_TUNING_EN |
-                               ESDHC_TUNING_START_TAP_DEFAULT;
-                       if (imx_data->boarddata.tuning_start_tap) {
-                               tmp &= ~ESDHC_TUNING_START_TAP_MASK;
+                       tmp |= ESDHC_STD_TUNING_EN;
+
+                       /*
+                        * ROM code or bootloader may config the start tap
+                        * and step, unmask them first.
+                        */
+                       tmp &= ~(ESDHC_TUNING_START_TAP_MASK | ESDHC_TUNING_STEP_MASK);
+                       if (imx_data->boarddata.tuning_start_tap)
                                tmp |= imx_data->boarddata.tuning_start_tap;
-                       }
+                       else
+                               tmp |= ESDHC_TUNING_START_TAP_DEFAULT;
 
                        if (imx_data->boarddata.tuning_step) {
-                               tmp &= ~ESDHC_TUNING_STEP_MASK;
                                tmp |= imx_data->boarddata.tuning_step
                                        << ESDHC_TUNING_STEP_SHIFT;
+                       } else {
+                               tmp |= ESDHC_TUNING_STEP_DEFAULT
+                                       << ESDHC_TUNING_STEP_SHIFT;
                        }
 
                        /* Disable the CMD CRC check for tuning, if not, need to
index b16e12e62e72223ca75a2ac8fa19ce1e0613280d..3db9f32d6a7b9fcd51672ed299e7456c0ac079d7 100644 (file)
@@ -1492,9 +1492,11 @@ static int sunxi_mmc_remove(struct platform_device *pdev)
        struct sunxi_mmc_host *host = mmc_priv(mmc);
 
        mmc_remove_host(mmc);
-       pm_runtime_force_suspend(&pdev->dev);
-       disable_irq(host->irq);
-       sunxi_mmc_disable(host);
+       pm_runtime_disable(&pdev->dev);
+       if (!pm_runtime_status_suspended(&pdev->dev)) {
+               disable_irq(host->irq);
+               sunxi_mmc_disable(host);
+       }
        dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
        mmc_free_host(mmc);
 
index 02601bb33de4e4485ba043916b819a9c9d15fa79..6e5e11c37078fd7cf4ff04af127d6d147045deb1 100644 (file)
@@ -50,7 +50,7 @@ static int scpart_scan_partmap(struct mtd_info *master, loff_t partmap_offs,
        int cnt = 0;
        int res = 0;
        int res2;
-       loff_t offs;
+       uint32_t offs;
        size_t retlen;
        struct sc_part_desc *pdesc = NULL;
        struct sc_part_desc *tmpdesc;
index f601e7bd8627919466db9e306048acc89b347146..1c689dafca2ae9ef3ede9b84b5deab479376e50a 100644 (file)
@@ -91,7 +91,7 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
        buf = mtd_parser_tplink_safeloader_read_table(mtd);
        if (!buf) {
                err = -ENOENT;
-               goto err_out;
+               goto err_free_parts;
        }
 
        for (idx = 0, offset = TPLINK_SAFELOADER_DATA_OFFSET;
@@ -118,6 +118,8 @@ static int mtd_parser_tplink_safeloader_parse(struct mtd_info *mtd,
 err_free:
        for (idx -= 1; idx >= 0; idx--)
                kfree(parts[idx].name);
+err_free_parts:
+       kfree(parts);
 err_out:
        return err;
 };
index d8703d7dfd0af3db75475907ec648eedaf2814eb..d67c926bca8ba1b4b1edae2b803d1315e0a00434 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/module.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/mutex.h>
 #include <linux/math64.h>
index 455b555275f129850ead61770379336a634007f5..c99ffe6c683a3853c67225f0828bfbadafe938db 100644 (file)
@@ -1549,6 +1549,7 @@ static void ad_port_selection_logic(struct port *port, bool *update_slave_arr)
                        slave_err(bond->dev, port->slave->dev,
                                  "Port %d did not find a suitable aggregator\n",
                                  port->actor_port_number);
+                       return;
                }
        }
        /* if all aggregator's ports are READY_N == TRUE, set ready=TRUE
index b4c65783960a5aa14de5d64aeea190f02a04be44..0363ce597661422b82a7d33ef001151b275f9ada 100644 (file)
@@ -2654,10 +2654,12 @@ static void bond_miimon_link_change(struct bonding *bond,
 
 static void bond_miimon_commit(struct bonding *bond)
 {
-       struct slave *slave, *primary;
+       struct slave *slave, *primary, *active;
        bool do_failover = false;
        struct list_head *iter;
 
+       ASSERT_RTNL();
+
        bond_for_each_slave(bond, slave, iter) {
                switch (slave->link_new_state) {
                case BOND_LINK_NOCHANGE:
@@ -2700,8 +2702,8 @@ static void bond_miimon_commit(struct bonding *bond)
 
                        bond_miimon_link_change(bond, slave, BOND_LINK_UP);
 
-                       if (!rcu_access_pointer(bond->curr_active_slave) || slave == primary ||
-                           slave->prio > rcu_dereference(bond->curr_active_slave)->prio)
+                       active = rtnl_dereference(bond->curr_active_slave);
+                       if (!active || slave == primary || slave->prio > active->prio)
                                do_failover = true;
 
                        continue;
index 47b54ecf2c6f6b502cd968968d0018c053aec26c..6178a96e389f26fe3199586bd558527ad8678626 100644 (file)
@@ -540,10 +540,10 @@ int ksz9477_fdb_del(struct ksz_device *dev, int port,
                ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]);
 
                /* clear forwarding port */
-               alu_table[2] &= ~BIT(port);
+               alu_table[1] &= ~BIT(port);
 
                /* if there is no port to forward, clear table */
-               if ((alu_table[2] & ALU_V_PORT_MAP) == 0) {
+               if ((alu_table[1] & ALU_V_PORT_MAP) == 0) {
                        alu_table[0] = 0;
                        alu_table[1] = 0;
                        alu_table[2] = 0;
index 7a2445a34eb778a4b1fb6cab6d0bf77605f3bb65..e3181d5471dfe3bcafb9695a1bce8554fa737196 100644 (file)
@@ -2,7 +2,6 @@
 config NET_DSA_MV88E6XXX
        tristate "Marvell 88E6xxx Ethernet switch fabric support"
        depends on NET_DSA
-       depends on PTP_1588_CLOCK_OPTIONAL
        select IRQ_DOMAIN
        select NET_DSA_TAG_EDSA
        select NET_DSA_TAG_DSA
@@ -13,7 +12,8 @@ config NET_DSA_MV88E6XXX
 config NET_DSA_MV88E6XXX_PTP
        bool "PTP support for Marvell 88E6xxx"
        default n
-       depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK
+       depends on (NET_DSA_MV88E6XXX = y && PTP_1588_CLOCK = y) || \
+                  (NET_DSA_MV88E6XXX = m && PTP_1588_CLOCK)
        help
          Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
          chips that support it.
index c5c3b4e92f28bf6f0a1a2660b4109f7a22b950b8..2f224b166bbb32dc583ed9f65a41cf72742afc04 100644 (file)
@@ -37,77 +37,104 @@ qca8k_split_addr(u32 regaddr, u16 *r1, u16 *r2, u16 *page)
 }
 
 static int
-qca8k_set_lo(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 lo)
+qca8k_mii_write_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
 {
-       u16 *cached_lo = &priv->mdio_cache.lo;
-       struct mii_bus *bus = priv->bus;
        int ret;
+       u16 lo;
 
-       if (lo == *cached_lo)
-               return 0;
-
+       lo = val & 0xffff;
        ret = bus->write(bus, phy_id, regnum, lo);
        if (ret < 0)
                dev_err_ratelimited(&bus->dev,
                                    "failed to write qca8k 32bit lo register\n");
 
-       *cached_lo = lo;
-       return 0;
+       return ret;
 }
 
 static int
-qca8k_set_hi(struct qca8k_priv *priv, int phy_id, u32 regnum, u16 hi)
+qca8k_mii_write_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
 {
-       u16 *cached_hi = &priv->mdio_cache.hi;
-       struct mii_bus *bus = priv->bus;
        int ret;
+       u16 hi;
 
-       if (hi == *cached_hi)
-               return 0;
-
+       hi = (u16)(val >> 16);
        ret = bus->write(bus, phy_id, regnum, hi);
        if (ret < 0)
                dev_err_ratelimited(&bus->dev,
                                    "failed to write qca8k 32bit hi register\n");
 
-       *cached_hi = hi;
-       return 0;
+       return ret;
 }
 
 static int
-qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+qca8k_mii_read_lo(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
 {
        int ret;
 
        ret = bus->read(bus, phy_id, regnum);
-       if (ret >= 0) {
-               *val = ret;
-               ret = bus->read(bus, phy_id, regnum + 1);
-               *val |= ret << 16;
-       }
+       if (ret < 0)
+               goto err;
 
-       if (ret < 0) {
-               dev_err_ratelimited(&bus->dev,
-                                   "failed to read qca8k 32bit register\n");
-               *val = 0;
-               return ret;
-       }
+       *val = ret & 0xffff;
+       return 0;
+
+err:
+       dev_err_ratelimited(&bus->dev,
+                           "failed to read qca8k 32bit lo register\n");
+       *val = 0;
+
+       return ret;
+}
 
+static int
+qca8k_mii_read_hi(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
+{
+       int ret;
+
+       ret = bus->read(bus, phy_id, regnum);
+       if (ret < 0)
+               goto err;
+
+       *val = ret << 16;
        return 0;
+
+err:
+       dev_err_ratelimited(&bus->dev,
+                           "failed to read qca8k 32bit hi register\n");
+       *val = 0;
+
+       return ret;
 }
 
-static void
-qca8k_mii_write32(struct qca8k_priv *priv, int phy_id, u32 regnum, u32 val)
+static int
+qca8k_mii_read32(struct mii_bus *bus, int phy_id, u32 regnum, u32 *val)
 {
-       u16 lo, hi;
+       u32 hi, lo;
        int ret;
 
-       lo = val & 0xffff;
-       hi = (u16)(val >> 16);
+       *val = 0;
 
-       ret = qca8k_set_lo(priv, phy_id, regnum, lo);
-       if (ret >= 0)
-               ret = qca8k_set_hi(priv, phy_id, regnum + 1, hi);
+       ret = qca8k_mii_read_lo(bus, phy_id, regnum, &lo);
+       if (ret < 0)
+               goto err;
+
+       ret = qca8k_mii_read_hi(bus, phy_id, regnum + 1, &hi);
+       if (ret < 0)
+               goto err;
+
+       *val = lo | hi;
+
+err:
+       return ret;
+}
+
+static void
+qca8k_mii_write32(struct mii_bus *bus, int phy_id, u32 regnum, u32 val)
+{
+       if (qca8k_mii_write_lo(bus, phy_id, regnum, val) < 0)
+               return;
+
+       qca8k_mii_write_hi(bus, phy_id, regnum + 1, val);
 }
 
 static int
@@ -146,7 +173,16 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb)
 
        command = get_unaligned_le32(&mgmt_ethhdr->command);
        cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command);
+
        len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command);
+       /* Special case for len of 15 as this is the max value for len and needs to
+        * be increased before converting it from word to dword.
+        */
+       if (len == 15)
+               len++;
+
+       /* We can ignore odd value, we always round up them in the alloc function. */
+       len *= sizeof(u16);
 
        /* Make sure the seq match the requested packet */
        if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq)
@@ -193,17 +229,33 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 *
        if (!skb)
                return NULL;
 
-       /* Max value for len reg is 15 (0xf) but the switch actually return 16 byte
-        * Actually for some reason the steps are:
-        * 0: nothing
-        * 1-4: first 4 byte
-        * 5-6: first 12 byte
-        * 7-15: all 16 byte
+       /* Hdr mgmt length value is in step of word size.
+        * As an example to process 4 byte of data the correct length to set is 2.
+        * To process 8 byte 4, 12 byte 6, 16 byte 8...
+        *
+        * Odd values will always return the next size on the ack packet.
+        * (length of 3 (6 byte) will always return 8 bytes of data)
+        *
+        * This means that a value of 15 (0xf) actually means reading/writing 32 bytes
+        * of data.
+        *
+        * To correctly calculate the length we devide the requested len by word and
+        * round up.
+        * On the ack function we can skip the odd check as we already handle the
+        * case here.
         */
-       if (len == 16)
-               real_len = 15;
-       else
-               real_len = len;
+       real_len = DIV_ROUND_UP(len, sizeof(u16));
+
+       /* We check if the result len is odd and we round up another time to
+        * the next size. (length of 3 will be increased to 4 as switch will always
+        * return 8 bytes)
+        */
+       if (real_len % sizeof(u16) != 0)
+               real_len++;
+
+       /* Max reg value is 0xf(15) but switch will always return the next size (32 byte) */
+       if (real_len == 16)
+               real_len--;
 
        skb_reset_mac_header(skb);
        skb_set_network_header(skb, skb->len);
@@ -417,7 +469,7 @@ qca8k_regmap_write(void *ctx, uint32_t reg, uint32_t val)
        if (ret < 0)
                goto exit;
 
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+       qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 
 exit:
        mutex_unlock(&bus->mdio_lock);
@@ -450,7 +502,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
 
        val &= ~mask;
        val |= write_val;
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+       qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 
 exit:
        mutex_unlock(&bus->mdio_lock);
@@ -688,9 +740,9 @@ qca8k_mdio_busy_wait(struct mii_bus *bus, u32 reg, u32 mask)
 
        qca8k_split_addr(reg, &r1, &r2, &page);
 
-       ret = read_poll_timeout(qca8k_mii_read32, ret1, !(val & mask), 0,
+       ret = read_poll_timeout(qca8k_mii_read_hi, ret1, !(val & mask), 0,
                                QCA8K_BUSY_WAIT_TIMEOUT * USEC_PER_MSEC, false,
-                               bus, 0x10 | r2, r1, &val);
+                               bus, 0x10 | r2, r1 + 1, &val);
 
        /* Check if qca8k_read has failed for a different reason
         * before returnting -ETIMEDOUT
@@ -725,14 +777,14 @@ qca8k_mdio_write(struct qca8k_priv *priv, int phy, int regnum, u16 data)
        if (ret)
                goto exit;
 
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+       qca8k_mii_write32(bus, 0x10 | r2, r1, val);
 
        ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
                                   QCA8K_MDIO_MASTER_BUSY);
 
 exit:
        /* even if the busy_wait timeouts try to clear the MASTER_EN */
-       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+       qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 
        mutex_unlock(&bus->mdio_lock);
 
@@ -762,18 +814,18 @@ qca8k_mdio_read(struct qca8k_priv *priv, int phy, int regnum)
        if (ret)
                goto exit;
 
-       qca8k_mii_write32(priv, 0x10 | r2, r1, val);
+       qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, val);
 
        ret = qca8k_mdio_busy_wait(bus, QCA8K_MDIO_MASTER_CTRL,
                                   QCA8K_MDIO_MASTER_BUSY);
        if (ret)
                goto exit;
 
-       ret = qca8k_mii_read32(bus, 0x10 | r2, r1, &val);
+       ret = qca8k_mii_read_lo(bus, 0x10 | r2, r1, &val);
 
 exit:
        /* even if the busy_wait timeouts try to clear the MASTER_EN */
-       qca8k_mii_write32(priv, 0x10 | r2, r1, 0);
+       qca8k_mii_write_hi(bus, 0x10 | r2, r1 + 1, 0);
 
        mutex_unlock(&bus->mdio_lock);
 
@@ -1943,8 +1995,6 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        }
 
        priv->mdio_cache.page = 0xffff;
-       priv->mdio_cache.lo = 0xffff;
-       priv->mdio_cache.hi = 0xffff;
 
        /* Check the detected switch id */
        ret = qca8k_read_switch_id(priv);
index 0b7a5cb12321670aba0575e5c5532340bd833c90..03514f7a20becce61d1628910abade1af7721adf 100644 (file)
@@ -375,11 +375,6 @@ struct qca8k_mdio_cache {
  * mdio writes
  */
        u16 page;
-/* lo and hi can also be cached and from Documentation we can skip one
- * extra mdio write if lo or hi is didn't change.
- */
-       u16 lo;
-       u16 hi;
 };
 
 struct qca8k_pcs {
index 8c8b4c88c7deade2d86ea2e003feb17d406ddb44..451c3a1b62553794e736e2d1eb7b981072def1c6 100644 (file)
@@ -2400,29 +2400,18 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
                return -EOPNOTSUPP;
        }
 
-       switch (func) {
-       case ENA_ADMIN_TOEPLITZ:
-               if (key) {
-                       if (key_len != sizeof(hash_key->key)) {
-                               netdev_err(ena_dev->net_device,
-                                          "key len (%u) doesn't equal the supported size (%zu)\n",
-                                          key_len, sizeof(hash_key->key));
-                               return -EINVAL;
-                       }
-                       memcpy(hash_key->key, key, key_len);
-                       rss->hash_init_val = init_val;
-                       hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
+       if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+               if (key_len != sizeof(hash_key->key)) {
+                       netdev_err(ena_dev->net_device,
+                                  "key len (%u) doesn't equal the supported size (%zu)\n",
+                                  key_len, sizeof(hash_key->key));
+                       return -EINVAL;
                }
-               break;
-       case ENA_ADMIN_CRC32:
-               rss->hash_init_val = init_val;
-               break;
-       default:
-               netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
-                          func);
-               return -EINVAL;
+               memcpy(hash_key->key, key, key_len);
+               hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
        }
 
+       rss->hash_init_val = init_val;
        old_func = rss->hash_func;
        rss->hash_func = func;
        rc = ena_com_set_hash_function(ena_dev);
index 48ae6d810f8f9fb23c5902069aab58f6209853cd..8da79eedc057c2f68dbbe5cd3ff6656934f915b7 100644 (file)
@@ -887,11 +887,7 @@ static int ena_set_tunable(struct net_device *netdev,
        switch (tuna->id) {
        case ETHTOOL_RX_COPYBREAK:
                len = *(u32 *)data;
-               if (len > adapter->netdev->mtu) {
-                       ret = -EINVAL;
-                       break;
-               }
-               adapter->rx_copybreak = len;
+               ret = ena_set_rx_copybreak(adapter, len);
                break;
        default:
                ret = -EINVAL;
index a95529a69cbb620599f4a01c4eb0e0a6abb2b71d..e8ad5ea31affec68303dc089fd0aeccb5fff00f3 100644 (file)
@@ -374,9 +374,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
 
 static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
 {
+       u32 verdict = ENA_XDP_PASS;
        struct bpf_prog *xdp_prog;
        struct ena_ring *xdp_ring;
-       u32 verdict = XDP_PASS;
        struct xdp_frame *xdpf;
        u64 *xdp_stat;
 
@@ -393,7 +393,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
                if (unlikely(!xdpf)) {
                        trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
                        xdp_stat = &rx_ring->rx_stats.xdp_aborted;
-                       verdict = XDP_ABORTED;
+                       verdict = ENA_XDP_DROP;
                        break;
                }
 
@@ -409,29 +409,35 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
 
                spin_unlock(&xdp_ring->xdp_tx_lock);
                xdp_stat = &rx_ring->rx_stats.xdp_tx;
+               verdict = ENA_XDP_TX;
                break;
        case XDP_REDIRECT:
                if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
                        xdp_stat = &rx_ring->rx_stats.xdp_redirect;
+                       verdict = ENA_XDP_REDIRECT;
                        break;
                }
                trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
                xdp_stat = &rx_ring->rx_stats.xdp_aborted;
-               verdict = XDP_ABORTED;
+               verdict = ENA_XDP_DROP;
                break;
        case XDP_ABORTED:
                trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
                xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+               verdict = ENA_XDP_DROP;
                break;
        case XDP_DROP:
                xdp_stat = &rx_ring->rx_stats.xdp_drop;
+               verdict = ENA_XDP_DROP;
                break;
        case XDP_PASS:
                xdp_stat = &rx_ring->rx_stats.xdp_pass;
+               verdict = ENA_XDP_PASS;
                break;
        default:
                bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
                xdp_stat = &rx_ring->rx_stats.xdp_invalid;
+               verdict = ENA_XDP_DROP;
        }
 
        ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
@@ -512,16 +518,18 @@ static void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
                                                 struct bpf_prog *prog,
                                                 int first, int count)
 {
+       struct bpf_prog *old_bpf_prog;
        struct ena_ring *rx_ring;
        int i = 0;
 
        for (i = first; i < count; i++) {
                rx_ring = &adapter->rx_ring[i];
-               xchg(&rx_ring->xdp_bpf_prog, prog);
-               if (prog) {
+               old_bpf_prog = xchg(&rx_ring->xdp_bpf_prog, prog);
+
+               if (!old_bpf_prog && prog) {
                        ena_xdp_register_rxq_info(rx_ring);
                        rx_ring->rx_headroom = XDP_PACKET_HEADROOM;
-               } else {
+               } else if (old_bpf_prog && !prog) {
                        ena_xdp_unregister_rxq_info(rx_ring);
                        rx_ring->rx_headroom = NET_SKB_PAD;
                }
@@ -672,6 +680,7 @@ static void ena_init_io_rings_common(struct ena_adapter *adapter,
        ring->ena_dev = adapter->ena_dev;
        ring->per_napi_packets = 0;
        ring->cpu = 0;
+       ring->numa_node = 0;
        ring->no_interrupt_event_cnt = 0;
        u64_stats_init(&ring->syncp);
 }
@@ -775,6 +784,7 @@ static int ena_setup_tx_resources(struct ena_adapter *adapter, int qid)
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
        tx_ring->cpu = ena_irq->cpu;
+       tx_ring->numa_node = node;
        return 0;
 
 err_push_buf_intermediate_buf:
@@ -907,6 +917,7 @@ static int ena_setup_rx_resources(struct ena_adapter *adapter,
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
        rx_ring->cpu = ena_irq->cpu;
+       rx_ring->numa_node = node;
 
        return 0;
 }
@@ -1619,12 +1630,12 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
         * we expect, then we simply drop it
         */
        if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
-               return XDP_DROP;
+               return ENA_XDP_DROP;
 
        ret = ena_xdp_execute(rx_ring, xdp);
 
        /* The xdp program might expand the headers */
-       if (ret == XDP_PASS) {
+       if (ret == ENA_XDP_PASS) {
                rx_info->page_offset = xdp->data - xdp->data_hard_start;
                rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
        }
@@ -1663,7 +1674,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
        xdp_init_buff(&xdp, ENA_PAGE_SIZE, &rx_ring->xdp_rxq);
 
        do {
-               xdp_verdict = XDP_PASS;
+               xdp_verdict = ENA_XDP_PASS;
                skb = NULL;
                ena_rx_ctx.ena_bufs = rx_ring->ena_bufs;
                ena_rx_ctx.max_bufs = rx_ring->sgl_size;
@@ -1691,7 +1702,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
                        xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
 
                /* allocate skb and fill it */
-               if (xdp_verdict == XDP_PASS)
+               if (xdp_verdict == ENA_XDP_PASS)
                        skb = ena_rx_skb(rx_ring,
                                         rx_ring->ena_bufs,
                                         ena_rx_ctx.descs,
@@ -1709,14 +1720,15 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
                                /* Packets was passed for transmission, unmap it
                                 * from RX side.
                                 */
-                               if (xdp_verdict == XDP_TX || xdp_verdict == XDP_REDIRECT) {
+                               if (xdp_verdict & ENA_XDP_FORWARDED) {
                                        ena_unmap_rx_buff(rx_ring,
                                                          &rx_ring->rx_buffer_info[req_id]);
                                        rx_ring->rx_buffer_info[req_id].page = NULL;
                                }
                        }
-                       if (xdp_verdict != XDP_PASS) {
+                       if (xdp_verdict != ENA_XDP_PASS) {
                                xdp_flags |= xdp_verdict;
+                               total_len += ena_rx_ctx.ena_bufs[0].len;
                                res_budget--;
                                continue;
                        }
@@ -1760,7 +1772,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
                ena_refill_rx_bufs(rx_ring, refill_required);
        }
 
-       if (xdp_flags & XDP_REDIRECT)
+       if (xdp_flags & ENA_XDP_REDIRECT)
                xdp_do_flush_map();
 
        return work_done;
@@ -1814,8 +1826,9 @@ static void ena_adjust_adaptive_rx_intr_moderation(struct ena_napi *ena_napi)
 static void ena_unmask_interrupt(struct ena_ring *tx_ring,
                                        struct ena_ring *rx_ring)
 {
+       u32 rx_interval = tx_ring->smoothed_interval;
        struct ena_eth_io_intr_reg intr_reg;
-       u32 rx_interval = 0;
+
        /* Rx ring can be NULL when for XDP tx queues which don't have an
         * accompanying rx_ring pair.
         */
@@ -1853,20 +1866,27 @@ static void ena_update_ring_numa_node(struct ena_ring *tx_ring,
        if (likely(tx_ring->cpu == cpu))
                goto out;
 
+       tx_ring->cpu = cpu;
+       if (rx_ring)
+               rx_ring->cpu = cpu;
+
        numa_node = cpu_to_node(cpu);
+
+       if (likely(tx_ring->numa_node == numa_node))
+               goto out;
+
        put_cpu();
 
        if (numa_node != NUMA_NO_NODE) {
                ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node);
-               if (rx_ring)
+               tx_ring->numa_node = numa_node;
+               if (rx_ring) {
+                       rx_ring->numa_node = numa_node;
                        ena_com_update_numa_node(rx_ring->ena_com_io_cq,
                                                 numa_node);
+               }
        }
 
-       tx_ring->cpu = cpu;
-       if (rx_ring)
-               rx_ring->cpu = cpu;
-
        return;
 out:
        put_cpu();
@@ -1987,11 +2007,10 @@ static int ena_io_poll(struct napi_struct *napi, int budget)
                        if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev))
                                ena_adjust_adaptive_rx_intr_moderation(ena_napi);
 
+                       ena_update_ring_numa_node(tx_ring, rx_ring);
                        ena_unmask_interrupt(tx_ring, rx_ring);
                }
 
-               ena_update_ring_numa_node(tx_ring, rx_ring);
-
                ret = rx_work_done;
        } else {
                ret = budget;
@@ -2376,7 +2395,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid)
        ctx.mem_queue_type = ena_dev->tx_mem_queue_type;
        ctx.msix_vector = msix_vector;
        ctx.queue_size = tx_ring->ring_size;
-       ctx.numa_node = cpu_to_node(tx_ring->cpu);
+       ctx.numa_node = tx_ring->numa_node;
 
        rc = ena_com_create_io_queue(ena_dev, &ctx);
        if (rc) {
@@ -2444,7 +2463,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid)
        ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
        ctx.msix_vector = msix_vector;
        ctx.queue_size = rx_ring->ring_size;
-       ctx.numa_node = cpu_to_node(rx_ring->cpu);
+       ctx.numa_node = rx_ring->numa_node;
 
        rc = ena_com_create_io_queue(ena_dev, &ctx);
        if (rc) {
@@ -2805,6 +2824,24 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
        return dev_was_up ? ena_up(adapter) : 0;
 }
 
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak)
+{
+       struct ena_ring *rx_ring;
+       int i;
+
+       if (rx_copybreak > min_t(u16, adapter->netdev->mtu, ENA_PAGE_SIZE))
+               return -EINVAL;
+
+       adapter->rx_copybreak = rx_copybreak;
+
+       for (i = 0; i < adapter->num_io_queues; i++) {
+               rx_ring = &adapter->rx_ring[i];
+               rx_ring->rx_copybreak = rx_copybreak;
+       }
+
+       return 0;
+}
+
 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count)
 {
        struct ena_com_dev *ena_dev = adapter->ena_dev;
index 1bdce99bf6888998d43e04049fd28284a60edd47..2cb141079474c60767032c525dad8af07943d01c 100644 (file)
@@ -262,9 +262,11 @@ struct ena_ring {
        bool disable_meta_caching;
        u16 no_interrupt_event_cnt;
 
-       /* cpu for TPH */
+       /* cpu and NUMA for TPH */
        int cpu;
-        /* number of tx/rx_buffer_info's entries */
+       int numa_node;
+
+       /* number of tx/rx_buffer_info's entries */
        int ring_size;
 
        enum ena_admin_placement_policy_type tx_mem_queue_type;
@@ -392,6 +394,8 @@ int ena_update_queue_sizes(struct ena_adapter *adapter,
 
 int ena_update_queue_count(struct ena_adapter *adapter, u32 new_channel_count);
 
+int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
+
 int ena_get_sset_count(struct net_device *netdev, int sset);
 
 static inline void ena_reset_device(struct ena_adapter *adapter,
@@ -409,6 +413,15 @@ enum ena_xdp_errors_t {
        ENA_XDP_NO_ENOUGH_QUEUES,
 };
 
+enum ENA_XDP_ACTIONS {
+       ENA_XDP_PASS            = 0,
+       ENA_XDP_TX              = BIT(0),
+       ENA_XDP_REDIRECT        = BIT(1),
+       ENA_XDP_DROP            = BIT(2)
+};
+
+#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
+
 static inline bool ena_xdp_present(struct ena_adapter *adapter)
 {
        return !!adapter->xdp_bpf_prog;
index 3936543a74d8f70ed5ecefaf6b4e18671cd72805..4030d619e84f56863d0fde61692ef12b386c4c32 100644 (file)
@@ -524,19 +524,28 @@ static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
        netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
 }
 
+static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
+{
+       unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+
+       /* From MAC ver 30H the TFCR is per priority, instead of per queue */
+       if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
+               return max_q_count;
+       else
+               return min_t(unsigned int, pdata->tx_q_count, max_q_count);
+}
+
 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
 {
-       unsigned int max_q_count, q_count;
        unsigned int reg, reg_val;
-       unsigned int i;
+       unsigned int i, q_count;
 
        /* Clear MTL flow control */
        for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
 
        /* Clear MAC flow control */
-       max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+       q_count = xgbe_get_fc_queue_count(pdata);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -553,9 +562,8 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
 {
        struct ieee_pfc *pfc = pdata->pfc;
        struct ieee_ets *ets = pdata->ets;
-       unsigned int max_q_count, q_count;
        unsigned int reg, reg_val;
-       unsigned int i;
+       unsigned int i, q_count;
 
        /* Set MTL flow control */
        for (i = 0; i < pdata->rx_q_count; i++) {
@@ -579,8 +587,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
        }
 
        /* Set MAC flow control */
-       max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
+       q_count = xgbe_get_fc_queue_count(pdata);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
index 7b666106feee935771b9e351f3b70b9d5d8d5cd3..614c0278419bcf31e8be085300b34acd00847df4 100644 (file)
@@ -1064,6 +1064,9 @@ static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
 
        devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
 
+       tasklet_kill(&pdata->tasklet_dev);
+       tasklet_kill(&pdata->tasklet_ecc);
+
        if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
                devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
 
index 22d4fc547a0a3f659678403cc917c421b7130d00..a9ccc4258ee50de5e51fe1b4d2fd21946b2a9cea 100644 (file)
@@ -447,8 +447,10 @@ static void xgbe_i2c_stop(struct xgbe_prv_data *pdata)
        xgbe_i2c_disable(pdata);
        xgbe_i2c_clear_all_interrupts(pdata);
 
-       if (pdata->dev_irq != pdata->i2c_irq)
+       if (pdata->dev_irq != pdata->i2c_irq) {
                devm_free_irq(pdata->dev, pdata->i2c_irq, pdata);
+               tasklet_kill(&pdata->tasklet_i2c);
+       }
 }
 
 static int xgbe_i2c_start(struct xgbe_prv_data *pdata)
index 4e97b486952205fbba19d6bfc938341c7f36a495..43fdd111235a66ab17ce66b47ce694f6f7a49937 100644 (file)
@@ -496,6 +496,7 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata,
        reg |= XGBE_KR_TRAINING_ENABLE;
        reg |= XGBE_KR_TRAINING_START;
        XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+       pdata->kr_start_time = jiffies;
 
        netif_dbg(pdata, link, pdata->netdev,
                  "KR training initiated\n");
@@ -632,6 +633,8 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata)
 
        xgbe_switch_mode(pdata);
 
+       pdata->an_result = XGBE_AN_READY;
+
        xgbe_an_restart(pdata);
 
        return XGBE_AN_INCOMPAT_LINK;
@@ -1275,9 +1278,30 @@ static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata)
 static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata)
 {
        unsigned long link_timeout;
+       unsigned long kr_time;
+       int wait;
 
        link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
        if (time_after(jiffies, link_timeout)) {
+               if ((xgbe_cur_mode(pdata) == XGBE_MODE_KR) &&
+                   pdata->phy.autoneg == AUTONEG_ENABLE) {
+                       /* AN restart should not happen while KR training is in progress.
+                        * The while loop ensures no AN restart during KR training,
+                        * waits up to 500ms and AN restart is triggered only if KR
+                        * training is failed.
+                        */
+                       wait = XGBE_KR_TRAINING_WAIT_ITER;
+                       while (wait--) {
+                               kr_time = pdata->kr_start_time +
+                                         msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
+                               if (time_after(jiffies, kr_time))
+                                       break;
+                               /* AN restart is not required, if AN result is COMPLETE */
+                               if (pdata->an_result == XGBE_AN_COMPLETE)
+                                       return;
+                               usleep_range(10000, 11000);
+                       }
+               }
                netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
                xgbe_phy_config_aneg(pdata);
        }
@@ -1390,8 +1414,10 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
        /* Disable auto-negotiation */
        xgbe_an_disable_all(pdata);
 
-       if (pdata->dev_irq != pdata->an_irq)
+       if (pdata->dev_irq != pdata->an_irq) {
                devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+               tasklet_kill(&pdata->tasklet_an);
+       }
 
        pdata->phy_if.phy_impl.stop(pdata);
 
index 71f24cb479355e8a40d9cdfbeea5f5736ddeb2a3..7a41367c437ddf3cc484d4650b4c0ccdb1496f37 100644 (file)
 /* Auto-negotiation */
 #define XGBE_AN_MS_TIMEOUT             500
 #define XGBE_LINK_TIMEOUT              5
+#define XGBE_KR_TRAINING_WAIT_ITER     50
 
 #define XGBE_SGMII_AN_LINK_STATUS      BIT(1)
 #define XGBE_SGMII_AN_LINK_SPEED       (BIT(2) | BIT(3))
@@ -1280,6 +1281,7 @@ struct xgbe_prv_data {
        unsigned int parallel_detect;
        unsigned int fec_ability;
        unsigned long an_start;
+       unsigned long kr_start_time;
        enum xgbe_an_mode an_mode;
 
        /* I2C support */
index d91fdb0c2649d895aa7076910482fb24a0e60e79..2cf96892e5650da9a94fddee0bea2d98ec8e6381 100644 (file)
@@ -2784,17 +2784,11 @@ static int bcm_enet_shared_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int bcm_enet_shared_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-
 /* this "shared" driver is needed because both macs share a single
  * address space
  */
 struct platform_driver bcm63xx_enet_shared_driver = {
        .probe  = bcm_enet_shared_probe,
-       .remove = bcm_enet_shared_remove,
        .driver = {
                .name   = "bcm63xx_enet_shared",
                .owner  = THIS_MODULE,
index 4c7d07c684c492cd74147b7a8798597bf6e4fb2e..240a7e8a76528ce3615870a5248a453cbd905719 100644 (file)
@@ -991,10 +991,9 @@ static struct sk_buff *bnxt_rx_multi_page_skb(struct bnxt *bp,
        dma_addr -= bp->rx_dma_offset;
        dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
                             DMA_ATTR_WEAK_ORDERING);
-       skb = build_skb(page_address(page), BNXT_PAGE_MODE_BUF_SIZE +
-                                           bp->rx_dma_offset);
+       skb = build_skb(page_address(page), PAGE_SIZE);
        if (!skb) {
-               __free_page(page);
+               page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
        skb_mark_for_recycle(skb);
@@ -1032,7 +1031,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 
        skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
        if (!skb) {
-               __free_page(page);
+               page_pool_recycle_direct(rxr->page_pool, page);
                return NULL;
        }
 
@@ -1925,7 +1924,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        dma_addr = rx_buf->mapping;
 
        if (bnxt_xdp_attached(bp, rxr)) {
-               bnxt_xdp_buff_init(bp, rxr, cons, &data_ptr, &len, &xdp);
+               bnxt_xdp_buff_init(bp, rxr, cons, data_ptr, len, &xdp);
                if (agg_bufs) {
                        u32 frag_len = bnxt_rx_agg_pages_xdp(bp, cpr, &xdp,
                                                             cp_cons, agg_bufs,
@@ -1940,7 +1939,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        }
 
        if (xdp_active) {
-               if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &len, event)) {
+               if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
                        rc = 1;
                        goto next_rx;
                }
@@ -3969,8 +3968,10 @@ void bnxt_set_ring_params(struct bnxt *bp)
                bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
 
                if (BNXT_RX_PAGE_MODE(bp)) {
-                       rx_space = BNXT_PAGE_MODE_BUF_SIZE;
-                       rx_size = BNXT_MAX_PAGE_MODE_MTU;
+                       rx_space = PAGE_SIZE;
+                       rx_size = PAGE_SIZE -
+                                 ALIGN(max(NET_SKB_PAD, XDP_PACKET_HEADROOM), 8) -
+                                 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                } else {
                        rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
                        rx_space = rx_size + NET_SKB_PAD +
@@ -5398,15 +5399,16 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
        req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT);
        req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID);
 
-       if (BNXT_RX_PAGE_MODE(bp) && !BNXT_RX_JUMBO_MODE(bp)) {
+       if (BNXT_RX_PAGE_MODE(bp)) {
+               req->jumbo_thresh = cpu_to_le16(bp->rx_buf_use_size);
+       } else {
                req->flags |= cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
                                          VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
                req->enables |=
                        cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
+               req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
+               req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
        }
-       /* thresholds not implemented in firmware yet */
-       req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
-       req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
        req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
        return hwrm_req_send(bp, req);
 }
@@ -13591,7 +13593,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return -ENOMEM;
 
        bp = netdev_priv(dev);
-       SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
        bp->board_idx = ent->driver_data;
        bp->msg_enable = BNXT_DEF_MSG_ENABLE;
        bnxt_set_max_func_irqs(bp, max_irqs);
@@ -13599,6 +13600,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (bnxt_vf_pciid(bp->board_idx))
                bp->flags |= BNXT_FLAG_VF;
 
+       /* No devlink port registration in case of a VF */
+       if (BNXT_PF(bp))
+               SET_NETDEV_DEVLINK_PORT(dev, &bp->dl_port);
+
        if (pdev->msix_cap)
                bp->flags |= BNXT_FLAG_MSIX_CAP;
 
index 41c6dd0ae447e3de530b4bfb009c117133b62a00..5163ef4a49ea35e49e6a1cbe9fb50079a06a3aa8 100644 (file)
@@ -591,12 +591,20 @@ struct nqe_cn {
 #define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
 
 #define BNXT_MAX_MTU           9500
-#define BNXT_PAGE_MODE_BUF_SIZE \
+
+/* First RX buffer page in XDP multi-buf mode
+ *
+ * +-------------------------------------------------------------------------+
+ * | XDP_PACKET_HEADROOM | bp->rx_buf_use_size              | skb_shared_info|
+ * | (bp->rx_dma_offset) |                                  |                |
+ * +-------------------------------------------------------------------------+
+ */
+#define BNXT_MAX_PAGE_MODE_MTU_SBUF \
        ((unsigned int)PAGE_SIZE - VLAN_ETH_HLEN - NET_IP_ALIGN -       \
         XDP_PACKET_HEADROOM)
 #define BNXT_MAX_PAGE_MODE_MTU \
-       BNXT_PAGE_MODE_BUF_SIZE - \
-       SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info))
+       (BNXT_MAX_PAGE_MODE_MTU_SBUF - \
+        SKB_DATA_ALIGN((unsigned int)sizeof(struct skb_shared_info)))
 
 #define BNXT_MIN_PKT_SIZE      52
 
@@ -2134,7 +2142,6 @@ struct bnxt {
 #define BNXT_DUMP_CRASH                1
 
        struct bpf_prog         *xdp_prog;
-       u8                      xdp_has_frags;
 
        struct bnxt_ptp_cfg     *ptp_cfg;
        u8                      ptp_all_rx_tstamp;
index cbf17fcfb7ab431244613871fe8dbf5e7f8818dc..ec573127b70762fd6b26f5108b28a4318f045d92 100644 (file)
@@ -3969,7 +3969,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
                test_info->timeout = HWRM_CMD_TIMEOUT;
        for (i = 0; i < bp->num_tests; i++) {
                char *str = test_info->string[i];
-               char *fw_str = resp->test0_name + i * 32;
+               char *fw_str = resp->test_name[i];
 
                if (i == BNXT_MACLPBK_TEST_IDX) {
                        strcpy(str, "Mac loopback test (offline)");
@@ -3980,14 +3980,9 @@ void bnxt_ethtool_init(struct bnxt *bp)
                } else if (i == BNXT_IRQ_TEST_IDX) {
                        strcpy(str, "Interrupt_test (offline)");
                } else {
-                       strscpy(str, fw_str, ETH_GSTRING_LEN);
-                       strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
-                       if (test_info->offline_mask & (1 << i))
-                               strncat(str, " (offline)",
-                                       ETH_GSTRING_LEN - strlen(str));
-                       else
-                               strncat(str, " (online)",
-                                       ETH_GSTRING_LEN - strlen(str));
+                       snprintf(str, ETH_GSTRING_LEN, "%s test (%s)",
+                                fw_str, test_info->offline_mask & (1 << i) ?
+                                       "offline" : "online");
                }
        }
 
index 2686a714a59f7782ec4bef3112384b31e3fabc5b..a5408879e077e9663ce6deb10dbf4c08d90c961d 100644 (file)
@@ -10249,14 +10249,7 @@ struct hwrm_selftest_qlist_output {
        u8      unused_0;
        __le16  test_timeout;
        u8      unused_1[2];
-       char    test0_name[32];
-       char    test1_name[32];
-       char    test2_name[32];
-       char    test3_name[32];
-       char    test4_name[32];
-       char    test5_name[32];
-       char    test6_name[32];
-       char    test7_name[32];
+       char    test_name[8][32];
        u8      eyescope_target_BER_support;
        #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E8_SUPPORTED  0x0UL
        #define SELFTEST_QLIST_RESP_EYESCOPE_TARGET_BER_SUPPORT_BER_1E9_SUPPORTED  0x1UL
index c3065ec0a47981064c133ec9bff25bf44e3eefd0..36d5202c0aeec94c19c92f90a3968dcc1e63ba3e 100644 (file)
@@ -177,7 +177,7 @@ bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
 }
 
 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
-                       u16 cons, u8 **data_ptr, unsigned int *len,
+                       u16 cons, u8 *data_ptr, unsigned int len,
                        struct xdp_buff *xdp)
 {
        struct bnxt_sw_rx_bd *rx_buf;
@@ -191,13 +191,10 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        offset = bp->rx_offset;
 
        mapping = rx_buf->mapping - bp->rx_dma_offset;
-       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
-
-       if (bp->xdp_has_frags)
-               buflen = BNXT_PAGE_MODE_BUF_SIZE + offset;
+       dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
 
        xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
-       xdp_prepare_buff(xdp, *data_ptr - offset, offset, *len, false);
+       xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
 }
 
 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +219,8 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
  * false   - packet should be passed to the stack.
  */
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-                struct xdp_buff xdp, struct page *page, unsigned int *len, u8 *event)
+                struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+                unsigned int *len, u8 *event)
 {
        struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
        struct bnxt_tx_ring_info *txr;
@@ -255,8 +253,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
                *event &= ~BNXT_RX_EVENT;
 
        *len = xdp.data_end - xdp.data;
-       if (orig_data != xdp.data)
+       if (orig_data != xdp.data) {
                offset = xdp.data - xdp.data_hard_start;
+               *data_ptr = xdp.data_hard_start + offset;
+       }
 
        switch (act) {
        case XDP_PASS:
@@ -401,10 +401,8 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
                netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
                return -EOPNOTSUPP;
        }
-       if (prog) {
+       if (prog)
                tx_xdp = bp->rx_nr_rings;
-               bp->xdp_has_frags = prog->aux->xdp_has_frags;
-       }
 
        tc = netdev_get_num_tc(dev);
        if (!tc)
index 505911ae095d33b081d0748748f3908cf2fcd4a5..ea430d6961df32abba20ac867fa0acf44fec9233 100644 (file)
@@ -18,8 +18,8 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
                                   struct xdp_buff *xdp);
 void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts);
 bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
-                struct xdp_buff xdp, struct page *page, unsigned int *len,
-                u8 *event);
+                struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+                unsigned int *len, u8 *event);
 int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
 int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
                  struct xdp_frame **frames, u32 flags);
@@ -27,7 +27,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
 bool bnxt_xdp_attached(struct bnxt *bp, struct bnxt_rx_ring_info *rxr);
 
 void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
-                       u16 cons, u8 **data_ptr, unsigned int *len,
+                       u16 cons, u8 *data_ptr, unsigned int len,
                        struct xdp_buff *xdp);
 void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
                              struct xdp_buff *xdp);
index 95667b979fab4254e02636a9b677139cd9042834..72e42820713dffbbca0f6925d32da5c6e53f0e82 100644 (file)
@@ -2187,7 +2187,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
        bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
                      skb_is_nonlinear(*skb);
        int padlen = ETH_ZLEN - (*skb)->len;
-       int headroom = skb_headroom(*skb);
        int tailroom = skb_tailroom(*skb);
        struct sk_buff *nskb;
        u32 fcs;
@@ -2201,9 +2200,6 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
                /* FCS could be appeded to tailroom. */
                if (tailroom >= ETH_FCS_LEN)
                        goto add_fcs;
-               /* FCS could be appeded by moving data to headroom. */
-               else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
-                       padlen = 0;
                /* No room for FCS, need to reallocate skb. */
                else
                        padlen = ETH_FCS_LEN;
@@ -2212,10 +2208,7 @@ static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
                padlen += ETH_FCS_LEN;
        }
 
-       if (!cloned && headroom + tailroom >= padlen) {
-               (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
-               skb_set_tail_pointer(*skb, (*skb)->len);
-       } else {
+       if (cloned || tailroom < padlen) {
                nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
                if (!nskb)
                        return -ENOMEM;
index 3a79ead5219ae27d1ff0ccf5a6f62f9d8ed39128..e96449eedfb5428a6361a76dc98a8f8df51744c5 100644 (file)
@@ -2290,14 +2290,14 @@ static void enetc_tx_onestep_tstamp(struct work_struct *work)
 
        priv = container_of(work, struct enetc_ndev_priv, tx_onestep_tstamp);
 
-       netif_tx_lock(priv->ndev);
+       netif_tx_lock_bh(priv->ndev);
 
        clear_bit_unlock(ENETC_TX_ONESTEP_TSTAMP_IN_PROGRESS, &priv->flags);
        skb = skb_dequeue(&priv->tx_skbs);
        if (skb)
                enetc_start_xmit(skb, priv->ndev);
 
-       netif_tx_unlock(priv->ndev);
+       netif_tx_unlock_bh(priv->ndev);
 }
 
 static void enetc_tx_onestep_tstamp_init(struct enetc_ndev_priv *priv)
index 91f02c5050285ba63e84513dd980d0b144e900f6..b307bef4dc298d4bec1fbf9e31757fe5e806eac9 100644 (file)
@@ -127,11 +127,6 @@ static int enetc_ierb_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int enetc_ierb_remove(struct platform_device *pdev)
-{
-       return 0;
-}
-
 static const struct of_device_id enetc_ierb_match[] = {
        { .compatible = "fsl,ls1028a-enetc-ierb", },
        {},
@@ -144,7 +139,6 @@ static struct platform_driver enetc_ierb_driver = {
                .of_match_table = enetc_ierb_match,
        },
        .probe = enetc_ierb_probe,
-       .remove = enetc_ierb_remove,
 };
 
 module_platform_driver(enetc_ierb_driver);
index d00bae15a9017d2e46dec8febc647f4721fce12d..d528ca681b6f4616de32dd9da7d0339509f8f707 100644 (file)
@@ -1430,7 +1430,7 @@ int dtsec_initialization(struct mac_device *mac_dev,
        dtsec->dtsec_drv_param->tx_pad_crc = true;
 
        phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
-       if (!phy_node || of_device_is_available(phy_node)) {
+       if (!phy_node || !of_device_is_available(phy_node)) {
                of_node_put(phy_node);
                err = -EINVAL;
                dev_err_probe(mac_dev->dev, err,
index 0ec5730b1788688c31056c12c828e9971797e5fb..b4c4fb873568c6c8f2a40719166ee21b69b63727 100644 (file)
@@ -3855,18 +3855,16 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
        return 0;
 }
 
-static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
+static void hns3_checksum_complete(struct hns3_enet_ring *ring,
                                   struct sk_buff *skb, u32 ptype, u16 csum)
 {
        if (ptype == HNS3_INVALID_PTYPE ||
            hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
-               return false;
+               return;
 
        hns3_ring_stats_update(ring, csum_complete);
        skb->ip_summed = CHECKSUM_COMPLETE;
        skb->csum = csum_unfold((__force __sum16)csum);
-
-       return true;
 }
 
 static void hns3_rx_handle_csum(struct sk_buff *skb, u32 l234info,
@@ -3926,8 +3924,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
                ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M,
                                        HNS3_RXD_PTYPE_S);
 
-       if (hns3_checksum_complete(ring, skb, ptype, csum))
-               return;
+       hns3_checksum_complete(ring, skb, ptype, csum);
 
        /* check if hardware has done checksum */
        if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
@@ -3936,6 +3933,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
        if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
                                 BIT(HNS3_RXD_OL3E_B) |
                                 BIT(HNS3_RXD_OL4E_B)))) {
+               skb->ip_summed = CHECKSUM_NONE;
                hns3_ring_stats_update(ring, l3l4_csum_err);
 
                return;
index 4e54f91f7a6c1575a5a518c2232dcf394ce8c6f8..07ad5f35219e26563957ea52089a2aba0d9fcf88 100644 (file)
@@ -3910,9 +3910,17 @@ static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
                        return ret;
                }
 
-               if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+               if (!reset ||
+                   !test_bit(HCLGE_VPORT_STATE_INITED, &vport->state))
                        continue;
 
+               if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state) &&
+                   hdev->reset_type == HNAE3_FUNC_RESET) {
+                       set_bit(HCLGE_VPORT_NEED_NOTIFY_RESET,
+                               &vport->need_notify);
+                       continue;
+               }
+
                /* Inform VF to process the reset.
                 * hclge_inform_reset_assert_to_vf may fail if VF
                 * driver is not loaded.
@@ -4609,18 +4617,25 @@ static void hclge_reset_service_task(struct hclge_dev *hdev)
 
 static void hclge_update_vport_alive(struct hclge_dev *hdev)
 {
+#define HCLGE_ALIVE_SECONDS_NORMAL             8
+
+       unsigned long alive_time = HCLGE_ALIVE_SECONDS_NORMAL * HZ;
        int i;
 
        /* start from vport 1 for PF is always alive */
        for (i = 1; i < hdev->num_alloc_vport; i++) {
                struct hclge_vport *vport = &hdev->vport[i];
 
-               if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+               if (!test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) ||
+                   !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+                       continue;
+               if (time_after(jiffies, vport->last_active_jiffies +
+                              alive_time)) {
                        clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
-
-               /* If vf is not alive, set to default value */
-               if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
-                       vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+                       dev_warn(&hdev->pdev->dev,
+                                "VF %u heartbeat timeout\n",
+                                i - HCLGE_VF_VPORT_START_NUM);
+               }
        }
 }
 
@@ -8064,9 +8079,11 @@ int hclge_vport_start(struct hclge_vport *vport)
 {
        struct hclge_dev *hdev = vport->back;
 
+       set_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
        set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
        set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
        vport->last_active_jiffies = jiffies;
+       vport->need_notify = 0;
 
        if (test_bit(vport->vport_id, hdev->vport_config_block)) {
                if (vport->vport_id) {
@@ -8084,7 +8101,9 @@ int hclge_vport_start(struct hclge_vport *vport)
 
 void hclge_vport_stop(struct hclge_vport *vport)
 {
+       clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
        clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+       vport->need_notify = 0;
 }
 
 static int hclge_client_start(struct hnae3_handle *handle)
@@ -9208,7 +9227,8 @@ static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
                return 0;
        }
 
-       dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %s\n",
+       dev_info(&hdev->pdev->dev,
+                "MAC of VF %d has been set to %s, will be active after VF reset\n",
                 vf, format_mac_addr);
        return 0;
 }
@@ -10465,12 +10485,16 @@ static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
         * for DEVICE_VERSION_V3, vf doesn't need to know about the port based
         * VLAN state.
         */
-       if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
-           test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
-               (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
-                                                       vport->vport_id,
-                                                       state, &vlan_info);
-
+       if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+               if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+                       (void)hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+                                                               vport->vport_id,
+                                                               state,
+                                                               &vlan_info);
+               else
+                       set_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+                               &vport->need_notify);
+       }
        return 0;
 }
 
@@ -11941,7 +11965,7 @@ static void hclge_reset_vport_state(struct hclge_dev *hdev)
        int i;
 
        for (i = 0; i < hdev->num_alloc_vport; i++) {
-               hclge_vport_stop(vport);
+               clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
                vport++;
        }
 }
@@ -12754,60 +12778,71 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
        return ret;
 }
 
-static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+static int hclge_sync_vport_promisc_mode(struct hclge_vport *vport)
 {
-       struct hclge_vport *vport = &hdev->vport[0];
        struct hnae3_handle *handle = &vport->nic;
+       struct hclge_dev *hdev = vport->back;
+       bool uc_en = false;
+       bool mc_en = false;
        u8 tmp_flags;
+       bool bc_en;
        int ret;
-       u16 i;
 
        if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
                set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
                vport->last_promisc_flags = vport->overflow_promisc_flags;
        }
 
-       if (test_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state)) {
+       if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+                               &vport->state))
+               return 0;
+
+       /* for PF */
+       if (!vport->vport_id) {
                tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
                ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
                                             tmp_flags & HNAE3_MPE);
-               if (!ret) {
-                       clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
-                                 &vport->state);
+               if (!ret)
                        set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
                                &vport->state);
-               }
+               else
+                       set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
+                               &vport->state);
+               return ret;
        }
 
-       for (i = 1; i < hdev->num_alloc_vport; i++) {
-               bool uc_en = false;
-               bool mc_en = false;
-               bool bc_en;
+       /* for VF */
+       if (vport->vf_info.trusted) {
+               uc_en = vport->vf_info.request_uc_en > 0 ||
+                       vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE;
+               mc_en = vport->vf_info.request_mc_en > 0 ||
+                       vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE;
+       }
+       bc_en = vport->vf_info.request_bc_en > 0;
 
-               vport = &hdev->vport[i];
+       ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
+                                        mc_en, bc_en);
+       if (ret) {
+               set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE, &vport->state);
+               return ret;
+       }
+       hclge_set_vport_vlan_fltr_change(vport);
 
-               if (!test_and_clear_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
-                                       &vport->state))
-                       continue;
+       return 0;
+}
 
-               if (vport->vf_info.trusted) {
-                       uc_en = vport->vf_info.request_uc_en > 0 ||
-                               vport->overflow_promisc_flags &
-                               HNAE3_OVERFLOW_UPE;
-                       mc_en = vport->vf_info.request_mc_en > 0 ||
-                               vport->overflow_promisc_flags &
-                               HNAE3_OVERFLOW_MPE;
-               }
-               bc_en = vport->vf_info.request_bc_en > 0;
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+       struct hclge_vport *vport;
+       int ret;
+       u16 i;
 
-               ret = hclge_cmd_set_promisc_mode(hdev, vport->vport_id, uc_en,
-                                                mc_en, bc_en);
-               if (ret) {
-                       set_bit(HCLGE_VPORT_STATE_PROMISC_CHANGE,
-                               &vport->state);
+       for (i = 0; i < hdev->num_alloc_vport; i++) {
+               vport = &hdev->vport[i];
+
+               ret = hclge_sync_vport_promisc_mode(vport);
+               if (ret)
                        return;
-               }
-               hclge_set_vport_vlan_fltr_change(vport);
        }
 }
 
@@ -12944,6 +12979,11 @@ static void hclge_clear_vport_vf_info(struct hclge_vport *vport, int vfid)
        struct hclge_vlan_info vlan_info;
        int ret;
 
+       clear_bit(HCLGE_VPORT_STATE_INITED, &vport->state);
+       clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+       vport->need_notify = 0;
+       vport->mps = 0;
+
        /* after disable sriov, clean VF rate configured by PF */
        ret = hclge_tm_qs_shaper_cfg(vport, 0);
        if (ret)
index 495b639b0dc249d81d68cb13376fe3f96f6c8564..13f23d606e77bdeb06692a73dad69b3a92b3c6f4 100644 (file)
@@ -995,9 +995,15 @@ enum HCLGE_VPORT_STATE {
        HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
        HCLGE_VPORT_STATE_PROMISC_CHANGE,
        HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE,
+       HCLGE_VPORT_STATE_INITED,
        HCLGE_VPORT_STATE_MAX
 };
 
+enum HCLGE_VPORT_NEED_NOTIFY {
+       HCLGE_VPORT_NEED_NOTIFY_RESET,
+       HCLGE_VPORT_NEED_NOTIFY_VF_VLAN,
+};
+
 struct hclge_vlan_info {
        u16 vlan_proto; /* so far support 802.1Q only */
        u16 qos;
@@ -1044,6 +1050,7 @@ struct hclge_vport {
        struct hnae3_handle roce;
 
        unsigned long state;
+       unsigned long need_notify;
        unsigned long last_active_jiffies;
        u32 mps; /* Max packet size */
        struct hclge_vf_info vf_info;
index a7b06c63143cc2082207cfdde8038b4a620a4260..04ff9bf121853ab7a0f876c55b49d1e6d9199035 100644 (file)
@@ -124,17 +124,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
        return status;
 }
 
+static int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type)
+{
+       __le16 msg_data;
+       u8 dest_vfid;
+
+       dest_vfid = (u8)vport->vport_id;
+       msg_data = cpu_to_le16(reset_type);
+
+       /* send this requested info to VF */
+       return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
+                                 HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+}
+
 int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
 {
        struct hclge_dev *hdev = vport->back;
-       __le16 msg_data;
        u16 reset_type;
-       u8 dest_vfid;
 
        BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
 
-       dest_vfid = (u8)vport->vport_id;
-
        if (hdev->reset_type == HNAE3_FUNC_RESET)
                reset_type = HNAE3_VF_PF_FUNC_RESET;
        else if (hdev->reset_type == HNAE3_FLR_RESET)
@@ -142,11 +151,7 @@ int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
        else
                reset_type = HNAE3_VF_FUNC_RESET;
 
-       msg_data = cpu_to_le16(reset_type);
-
-       /* send this requested info to VF */
-       return hclge_send_mbx_msg(vport, (u8 *)&msg_data, sizeof(msg_data),
-                                 HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+       return hclge_inform_vf_reset(vport, reset_type);
 }
 
 static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
@@ -652,9 +657,56 @@ static int hclge_reset_vf(struct hclge_vport *vport)
        return hclge_func_reset_cmd(hdev, vport->vport_id);
 }
 
+static void hclge_notify_vf_config(struct hclge_vport *vport)
+{
+       struct hclge_dev *hdev = vport->back;
+       struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+       struct hclge_port_base_vlan_config *vlan_cfg;
+       int ret;
+
+       hclge_push_vf_link_status(vport);
+       if (test_bit(HCLGE_VPORT_NEED_NOTIFY_RESET, &vport->need_notify)) {
+               ret = hclge_inform_vf_reset(vport, HNAE3_VF_PF_FUNC_RESET);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "failed to inform VF %u reset!",
+                               vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+                       return;
+               }
+               vport->need_notify = 0;
+               return;
+       }
+
+       if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3 &&
+           test_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify)) {
+               vlan_cfg = &vport->port_base_vlan_cfg;
+               ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+                                                       vport->vport_id,
+                                                       vlan_cfg->state,
+                                                       &vlan_cfg->vlan_info);
+               if (ret) {
+                       dev_err(&hdev->pdev->dev,
+                               "failed to inform VF %u port base vlan!",
+                               vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+                       return;
+               }
+               clear_bit(HCLGE_VPORT_NEED_NOTIFY_VF_VLAN, &vport->need_notify);
+       }
+}
+
 static void hclge_vf_keep_alive(struct hclge_vport *vport)
 {
+       struct hclge_dev *hdev = vport->back;
+
        vport->last_active_jiffies = jiffies;
+
+       if (test_bit(HCLGE_VPORT_STATE_INITED, &vport->state) &&
+           !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+               set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+               dev_info(&hdev->pdev->dev, "VF %u is alive!",
+                        vport->vport_id - HCLGE_VF_VPORT_START_NUM);
+               hclge_notify_vf_config(vport);
+       }
 }
 
 static int hclge_set_vf_mtu(struct hclge_vport *vport,
@@ -954,6 +1006,7 @@ static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
        hclge_rm_vport_all_mac_table(param->vport, true,
                                     HCLGE_MAC_ADDR_MC);
        hclge_rm_vport_all_vlan_table(param->vport, true);
+       param->vport->mps = 0;
        return 0;
 }
 
index db6f7cdba9587ebc2121aa3ff48b58dcdf0abe5f..e84e5be8e59ed88f4539e149743536a65cf136ea 100644 (file)
@@ -2767,7 +2767,8 @@ static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
        struct pci_dev *pdev = hdev->pdev;
        int ret = 0;
 
-       if (hdev->reset_type == HNAE3_VF_FULL_RESET &&
+       if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+            hdev->reset_type == HNAE3_FLR_RESET) &&
            test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
                hclgevf_misc_irq_uninit(hdev);
                hclgevf_uninit_msi(hdev);
@@ -3129,7 +3130,7 @@ static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
 
        hclgevf_update_rss_size(handle, new_tqps_num);
 
-       hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map,
+       hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map,
                                   tc_offset, tc_valid, tc_size);
        ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset,
                                         tc_valid, tc_size);
index c4e451ef79422a4cd2bf509e22b7b177cc329b33..adc02adef83a2ad180eb9b7fe1390b6ada1c6e07 100644 (file)
@@ -3850,7 +3850,7 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
                                field_flags |= IAVF_CLOUD_FIELD_IIP;
                        } else {
                                dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
-                                       be32_to_cpu(match.mask->dst));
+                                       be32_to_cpu(match.mask->src));
                                return -EINVAL;
                        }
                }
index b5a7f246d230fdc8aad9b5fe6e6c95acdeadfd53..43e199b5b513b2ad32ec382e5563e46e23bc8428 100644 (file)
@@ -363,6 +363,7 @@ ice_gnss_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
        /* Send the data out to a hardware port */
        write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL);
        if (!write_buf) {
+               kfree(cmd_buf);
                err = -ENOMEM;
                goto exit;
        }
@@ -460,6 +461,9 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
        for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
                pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]),
                                               GFP_KERNEL);
+               if (!pf->gnss_tty_port[i])
+                       goto err_out;
+
                pf->gnss_serial[i] = NULL;
 
                tty_port_init(pf->gnss_tty_port[i]);
@@ -469,21 +473,23 @@ static struct tty_driver *ice_gnss_create_tty_driver(struct ice_pf *pf)
        err = tty_register_driver(tty_driver);
        if (err) {
                dev_err(dev, "Failed to register TTY driver err=%d\n", err);
-
-               for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) {
-                       tty_port_destroy(pf->gnss_tty_port[i]);
-                       kfree(pf->gnss_tty_port[i]);
-               }
-               kfree(ttydrv_name);
-               tty_driver_kref_put(pf->ice_gnss_tty_driver);
-
-               return NULL;
+               goto err_out;
        }
 
        for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++)
                dev_info(dev, "%s%d registered\n", ttydrv_name, i);
 
        return tty_driver;
+
+err_out:
+       while (i--) {
+               tty_port_destroy(pf->gnss_tty_port[i]);
+               kfree(pf->gnss_tty_port[i]);
+       }
+       kfree(ttydrv_name);
+       tty_driver_kref_put(pf->ice_gnss_tty_driver);
+
+       return NULL;
 }
 
 /**
index 907055b77af0e0be7ac65f19128fcf8672fc8533..7105de6fb3444bbfe7a1f5a269807a89f7896880 100644 (file)
@@ -783,7 +783,7 @@ construct_skb:
 static void
 ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
 {
-       xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
+       page_frag_free(tx_buf->raw_buf);
        xdp_ring->xdp_tx_active--;
        dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
                         dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
index a7b22639cfcd9b6b5c94865b5029d0bf66fcd6af..e9747ec5ac0b83b0b982b60cb1f44d9ba4d57962 100644 (file)
 #define IGC_TSAUXC_EN_TT0      BIT(0)  /* Enable target time 0. */
 #define IGC_TSAUXC_EN_TT1      BIT(1)  /* Enable target time 1. */
 #define IGC_TSAUXC_EN_CLK0     BIT(2)  /* Enable Configurable Frequency Clock 0. */
+#define IGC_TSAUXC_ST0         BIT(4)  /* Start Clock 0 Toggle on Target Time 0. */
 #define IGC_TSAUXC_EN_CLK1     BIT(5)  /* Enable Configurable Frequency Clock 1. */
+#define IGC_TSAUXC_ST1         BIT(7)  /* Start Clock 1 Toggle on Target Time 1. */
 #define IGC_TSAUXC_EN_TS0      BIT(8)  /* Enable hardware timestamp 0. */
 #define IGC_TSAUXC_AUTT0       BIT(9)  /* Auxiliary Timestamp Taken. */
 #define IGC_TSAUXC_EN_TS1      BIT(10) /* Enable hardware timestamp 0. */
index 8dbb9f903ca70fc5864b4911249ed63e1c8aaa1f..c34734d432e0d726b4e15f7cdd14c80f8eb4630e 100644 (file)
@@ -322,7 +322,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                ts = ns_to_timespec64(ns);
                if (rq->perout.index == 1) {
                        if (use_freq) {
-                               tsauxc_mask = IGC_TSAUXC_EN_CLK1;
+                               tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1;
                                tsim_mask = 0;
                        } else {
                                tsauxc_mask = IGC_TSAUXC_EN_TT1;
@@ -333,7 +333,7 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                        freqout = IGC_FREQOUT1;
                } else {
                        if (use_freq) {
-                               tsauxc_mask = IGC_TSAUXC_EN_CLK0;
+                               tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0;
                                tsim_mask = 0;
                        } else {
                                tsauxc_mask = IGC_TSAUXC_EN_TT0;
@@ -347,10 +347,12 @@ static int igc_ptp_feature_enable_i225(struct ptp_clock_info *ptp,
                tsauxc = rd32(IGC_TSAUXC);
                tsim = rd32(IGC_TSIM);
                if (rq->perout.index == 1) {
-                       tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1);
+                       tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 |
+                                   IGC_TSAUXC_ST1);
                        tsim &= ~IGC_TSICR_TT1;
                } else {
-                       tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0);
+                       tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 |
+                                   IGC_TSAUXC_ST0);
                        tsim &= ~IGC_TSICR_TT0;
                }
                if (on) {
index 24aa97f993ca188d6ee60a80a3692fc772978cc1..123dca9ce4683f2aad71d38959a6d5170c26e954 100644 (file)
@@ -855,9 +855,11 @@ static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn)
        rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn);
        if (rp_pdev && rp_pdev->subordinate) {
                bus = rp_pdev->subordinate->number;
+               pci_dev_put(rp_pdev);
                return pci_get_domain_bus_and_slot(0, bus, 0);
        }
 
+       pci_dev_put(rp_pdev);
        return NULL;
 }
 
@@ -874,6 +876,7 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
        struct ixgbe_adapter *adapter = hw->back;
        struct pci_dev *pdev = adapter->pdev;
        struct pci_dev *func0_pdev;
+       bool has_mii = false;
 
        /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices
         * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0
@@ -884,15 +887,16 @@ static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw)
        func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0));
        if (func0_pdev) {
                if (func0_pdev == pdev)
-                       return true;
-               else
-                       return false;
+                       has_mii = true;
+               goto out;
        }
        func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0));
        if (func0_pdev == pdev)
-               return true;
+               has_mii = true;
 
-       return false;
+out:
+       pci_dev_put(func0_pdev);
+       return has_mii;
 }
 
 /**
index b2b71fe80d61c2239e2231729f1f64a2fe8d285d..724df6398bbe28e6bcff5435006bb472fd9a3bb9 100644 (file)
@@ -774,9 +774,9 @@ int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable)
 
        cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG);
        if (enable)
-               cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN;
+               cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN;
        else
-               cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN);
+               cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN);
        cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg);
        return 0;
 }
index fb2d37676d84e21717b497509604c9f91505990a..5a20d93004c7181a37a2c862f57edc8ad10a6957 100644 (file)
@@ -26,7 +26,6 @@
 #define CMR_P2X_SEL_SHIFT              59ULL
 #define CMR_P2X_SEL_NIX0               1ULL
 #define CMR_P2X_SEL_NIX1               2ULL
-#define CMR_EN                         BIT_ULL(55)
 #define DATA_PKT_TX_EN                 BIT_ULL(53)
 #define DATA_PKT_RX_EN                 BIT_ULL(54)
 #define CGX_LMAC_TYPE_SHIFT            40
index fa8029a940689eb605c24dbe1dcbfaeeed58d0b4..eb25e458266ca7b7934e41795fdf98b49723d023 100644 (file)
@@ -589,7 +589,7 @@ int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
        u16 pcifunc = req->hdr.pcifunc;
        struct mcs_rsrc_map *map;
        struct mcs *mcs;
-       int rc;
+       int rc = 0;
 
        if (req->mcs_id >= rvu->mcs_blk_cnt)
                return MCS_AF_ERR_INVALID_MCSID;
index 9e10e7471b8874582749d4dfd2bb6d2347eada80..8a41ad8ca04f115de066a1bbc44f80939bdf73f4 100644 (file)
@@ -1376,18 +1376,23 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
                sq = &qset->sq[qidx];
                sq->sqb_count = 0;
                sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
-               if (!sq->sqb_ptrs)
-                       return -ENOMEM;
+               if (!sq->sqb_ptrs) {
+                       err = -ENOMEM;
+                       goto err_mem;
+               }
 
                for (ptr = 0; ptr < num_sqbs; ptr++) {
-                       if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
-                               return -ENOMEM;
+                       err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+                       if (err)
+                               goto err_mem;
                        pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
                        sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
                }
        }
 
-       return 0;
+err_mem:
+       return err ? -ENOMEM : 0;
+
 fail:
        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
        otx2_aura_pool_free(pfvf);
@@ -1430,13 +1435,13 @@ int otx2_rq_aura_pool_init(struct otx2_nic *pfvf)
        for (pool_id = 0; pool_id < hw->rqpool_cnt; pool_id++) {
                pool = &pfvf->qset.pool[pool_id];
                for (ptr = 0; ptr < num_ptrs; ptr++) {
-                       if (otx2_alloc_rbuf(pfvf, pool, &bufptr))
+                       err = otx2_alloc_rbuf(pfvf, pool, &bufptr);
+                       if (err)
                                return -ENOMEM;
                        pfvf->hw_ops->aura_freeptr(pfvf, pool_id,
                                                   bufptr + OTX2_HEAD_ROOM);
                }
        }
-
        return 0;
 fail:
        otx2_mbox_reset(&pfvf->mbox.mbox, 0);
index 5bee3c3a7ce43fef7224fd4265922ff5a8b7a400..3d22cc6a2804a8d43324245a8b65ab554ec6b540 100644 (file)
@@ -736,8 +736,10 @@ static inline void cn10k_aura_freeptr(void *dev, int aura, u64 buf)
        u64 ptrs[2];
 
        ptrs[1] = buf;
+       get_cpu();
        /* Free only one buffer at time during init and teardown */
        __cn10k_aura_freeptr(pfvf, aura, ptrs, 2);
+       put_cpu();
 }
 
 /* Alloc pointer from pool/aura */
index 86653bb8e403a0af2d6bb9ef7bb80a9deca723ee..7f8ffbf79cf742905b2d6e5cfbd426a18b8f7bd8 100644 (file)
@@ -758,6 +758,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
        if (vf->otx2_wq)
                destroy_workqueue(vf->otx2_wq);
        otx2_ptp_destroy(vf);
+       otx2_mcam_flow_del(vf);
+       otx2_shutdown_tc(vf);
        otx2vf_disable_mbox_intr(vf);
        otx2_detach_resources(&vf->mbox);
        if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
index d3ca745d107d62ca9e6cac4bb2d9ef729ed7dded..c837103a9ee33870d2a3d6a9e903d4133b51d657 100644 (file)
@@ -2176,15 +2176,9 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
                return -EINVAL;
        }
 
-       cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
-       if (!cmd->stats)
-               return -ENOMEM;
-
        cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
-       if (!cmd->pool) {
-               err = -ENOMEM;
-               goto dma_pool_err;
-       }
+       if (!cmd->pool)
+               return -ENOMEM;
 
        err = alloc_cmd_page(dev, cmd);
        if (err)
@@ -2268,8 +2262,6 @@ err_free_page:
 
 err_free_pool:
        dma_pool_destroy(cmd->pool);
-dma_pool_err:
-       kvfree(cmd->stats);
        return err;
 }
 
@@ -2282,7 +2274,6 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
        destroy_msg_cache(dev);
        free_cmd_page(dev, cmd);
        dma_pool_destroy(cmd->pool);
-       kvfree(cmd->stats);
 }
 
 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
index ddb197970c22c5c8994db59f964f439f41e39ffa..5bd83c0275f8218af3a2f182376706cfbff08c7c 100644 (file)
@@ -468,7 +468,7 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
        bool new_state = val.vbool;
 
        if (new_state && !MLX5_CAP_GEN(dev, roce) &&
-           !MLX5_CAP_GEN(dev, roce_rw_supported)) {
+           !(MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))) {
                NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
                return -EOPNOTSUPP;
        }
@@ -563,7 +563,7 @@ static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
                                          union devlink_param_value val,
                                          struct netlink_ext_ack *extack)
 {
-       return (val.vu16 >= 64 && val.vu16 <= 4096) ? 0 : -EINVAL;
+       return (val.vu32 >= 64 && val.vu32 <= 4096) ? 0 : -EINVAL;
 }
 
 static const struct devlink_param mlx5_devlink_params[] = {
index 6dac76fa58a3f158fdd5fc08cddeb94685c160c7..09d441ecb9f6d204e236f7e340e174a0842349bb 100644 (file)
@@ -637,7 +637,7 @@ mlx5e_htb_update_children(struct mlx5e_htb *htb, struct mlx5e_qos_node *node,
                if (child->bw_share == old_bw_share)
                        continue;
 
-               err_one = mlx5_qos_update_node(htb->mdev, child->hw_id, child->bw_share,
+               err_one = mlx5_qos_update_node(htb->mdev, child->bw_share,
                                               child->max_average_bw, child->hw_id);
                if (!err && err_one) {
                        err = err_one;
@@ -671,7 +671,7 @@ mlx5e_htb_node_modify(struct mlx5e_htb *htb, u16 classid, u64 rate, u64 ceil,
        mlx5e_htb_convert_rate(htb, rate, node->parent, &bw_share);
        mlx5e_htb_convert_ceil(htb, ceil, &max_average_bw);
 
-       err = mlx5_qos_update_node(htb->mdev, node->parent->hw_id, bw_share,
+       err = mlx5_qos_update_node(htb->mdev, bw_share,
                                   max_average_bw, node->hw_id);
        if (err) {
                NL_SET_ERR_MSG_MOD(extack, "Firmware error when modifying a node.");
index 585bdc8383ee21fb6371757b66a6ee1bf3edd425..4ad19c98129440a00e1aef412c1145f66a5fd487 100644 (file)
@@ -578,7 +578,6 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
 {
        enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
        u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
-       bool unaligned = xsk ? xsk->unaligned : false;
        u16 max_mtu_pkts;
 
        if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode))
@@ -591,7 +590,7 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
         * needed number of WQEs exceeds the maximum.
         */
        max_mtu_pkts = min_t(u8, MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE,
-                            mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, unaligned));
+                            mlx5e_mpwrq_max_log_rq_pkts(mdev, page_shift, xsk->unaligned));
        if (params->log_rq_mtu_frames > max_mtu_pkts) {
                mlx5_core_err(mdev, "Current RQ length %d is too big for XSK with given frame size %u\n",
                              1 << params->log_rq_mtu_frames, xsk->chunk_size);
index 5f6f95ad6888c433ae333a9fb006fbf6f9393a3f..1ae15b8536a8576a793a1bc148f5f9377dcdda66 100644 (file)
@@ -459,7 +459,11 @@ static int mlx5e_rx_reporter_diagnose(struct devlink_health_reporter *reporter,
                goto unlock;
 
        for (i = 0; i < priv->channels.num; i++) {
-               struct mlx5e_rq *rq = &priv->channels.c[i]->rq;
+               struct mlx5e_channel *c = priv->channels.c[i];
+               struct mlx5e_rq *rq;
+
+               rq = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state) ?
+                       &c->xskrq : &c->rq;
 
                err = mlx5e_rx_reporter_build_diagnose_output(rq, fmsg);
                if (err)
index 512d431489228eba07e4e333632f28ff2807ba6e..c4378afdec09e43c02813e3a2f88ccc93170e56e 100644 (file)
@@ -34,12 +34,6 @@ static int police_act_validate(const struct flow_action_entry *act,
                return -EOPNOTSUPP;
        }
 
-       if (act->police.rate_pkt_ps) {
-               NL_SET_ERR_MSG_MOD(extack,
-                                  "QoS offload not support packets per second");
-               return -EOPNOTSUPP;
-       }
-
        return 0;
 }
 
index 8d7d761482d272bae10a3b75c080955ef564ba5d..50b60fd0094676120d98239a6827625e101e7c76 100644 (file)
@@ -127,6 +127,7 @@ mlx5e_post_meter_add_rule(struct mlx5e_priv *priv,
                attr->counter = act_counter;
 
        attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT;
+       attr->inner_match_level = MLX5_MATCH_NONE;
        attr->outer_match_level = MLX5_MATCH_NONE;
        attr->chain = 0;
        attr->prio = 0;
index 1cbd2eb9d04f90ada8e6574ec409bdeaf96f24a4..f2c2c752bd1c345849d68245013feee7c3a4a2f9 100644 (file)
@@ -477,7 +477,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        struct mlx5e_sample_flow *sample_flow;
        struct mlx5e_sample_attr *sample_attr;
        struct mlx5_flow_attr *pre_attr;
-       u32 tunnel_id = attr->tunnel_id;
        struct mlx5_eswitch *esw;
        u32 default_tbl_id;
        u32 obj_id;
@@ -522,7 +521,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        restore_obj.sample.group_id = sample_attr->group_num;
        restore_obj.sample.rate = sample_attr->rate;
        restore_obj.sample.trunc_size = sample_attr->trunc_size;
-       restore_obj.sample.tunnel_id = tunnel_id;
+       restore_obj.sample.tunnel_id = attr->tunnel_id;
        err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id);
        if (err)
                goto err_obj_id;
@@ -548,7 +547,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
        /* For decap action, do decap in the original flow table instead of the
         * default flow table.
         */
-       if (tunnel_id)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
                pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
        pre_attr->modify_hdr = sample_flow->restore->modify_hdr;
        pre_attr->flags = MLX5_ATTR_FLAG_SAMPLE;
index a69849e0deed7e8ed5a31501172266727be53ba8..313df8232db707fea7b2bc49084e0e20230d391e 100644 (file)
@@ -2103,14 +2103,9 @@ out_err:
 static void
 mlx5_ct_tc_create_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
 {
-       bool is_fdb = ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB;
        struct mlx5_tc_ct_debugfs *ct_dbgfs = &ct_priv->debugfs;
-       char dirname[16] = {};
 
-       if (sscanf(dirname, "ct_%s", is_fdb ? "fdb" : "nic") < 0)
-               return;
-
-       ct_dbgfs->root = debugfs_create_dir(dirname, mlx5_debugfs_get_dev_root(ct_priv->dev));
+       ct_dbgfs->root = debugfs_create_dir("ct", mlx5_debugfs_get_dev_root(ct_priv->dev));
        debugfs_create_atomic_t("offloaded", 0400, ct_dbgfs->root,
                                &ct_dbgfs->stats.offloaded);
        debugfs_create_atomic_t("rx_dropped", 0400, ct_dbgfs->root,
index ff73d25bc6eb8b1108e1e4a81ce87b8c8e7f6222..2aaf8ab857b8f29fb819d83c9054f57d76403c42 100644 (file)
@@ -222,7 +222,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
        int err;
 
        list_for_each_entry(flow, flow_list, tmp_list) {
-               if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
+               if (!mlx5e_is_offloaded_flow(flow))
                        continue;
 
                attr = mlx5e_tc_get_encap_attr(flow);
@@ -231,6 +231,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
                esw_attr->dests[flow->tmp_entry_index].pkt_reformat = NULL;
 
+               /* Clear pkt_reformat before checking slow path flag. Because
+                * in next iteration, the same flow is already set slow path
+                * flag, but still need to clear the pkt_reformat.
+                */
+               if (flow_flag_test(flow, SLOW))
+                       continue;
+
                /* update from encap rule to slow path rule */
                spec = &flow->attr->parse_attr->spec;
                rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
index f5b26f5a7de46054ae57d4605c3513210bee5336..054d80c4e65cfc15fe445d4d8cce626d984b2548 100644 (file)
@@ -273,6 +273,11 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
                 geneve_tlv_option_0_data, be32_to_cpu(opt_data_key));
        MLX5_SET(fte_match_set_misc3, misc_3_c,
                 geneve_tlv_option_0_data, be32_to_cpu(opt_data_mask));
+       if (MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
+                                      ft_field_support.geneve_tlv_option_0_exist)) {
+               MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, geneve_tlv_option_0_exist);
+               MLX5_SET_TO_ONES(fte_match_set_misc, misc_v, geneve_tlv_option_0_exist);
+       }
 
        spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
 
index fd07c4cbfd1d253bfb19fe9ac99feb099fd7b288..1f62c702b6255da0a5d980cfcea1c8f07fbfd0c5 100644 (file)
@@ -88,6 +88,8 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
        struct udphdr *udp = (struct udphdr *)(buf);
        struct vxlanhdr *vxh;
 
+       if (tun_key->tun_flags & TUNNEL_VXLAN_OPT)
+               return -EOPNOTSUPP;
        vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
        *ip_proto = IPPROTO_UDP;
 
index a92e19c4c499d405aee95ad39d4853aa374b83bd..8bed9c361075471c5f90b1069d15b9cd3cb6dc53 100644 (file)
@@ -122,11 +122,8 @@ struct mlx5e_ipsec_aso {
        u8 ctx[MLX5_ST_SZ_BYTES(ipsec_aso)];
        dma_addr_t dma_addr;
        struct mlx5_aso *aso;
-       /* IPsec ASO caches data on every query call,
-        * so in nested calls, we can use this boolean to save
-        * recursive calls to mlx5e_ipsec_aso_query()
-        */
-       u8 use_cache : 1;
+       /* Protect ASO WQ access, as it is global to whole IPsec */
+       spinlock_t lock;
 };
 
 struct mlx5e_ipsec {
index 8e3614218fc4fabc7bac9c60a0874c4234f12581..2461462b7b991c96bebf8b253079246adddfcc0d 100644 (file)
@@ -320,7 +320,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
        if (ret)
                goto unlock;
 
-       aso->use_cache = true;
        if (attrs->esn_trigger &&
            !MLX5_GET(ipsec_aso, aso->ctx, esn_event_arm)) {
                u32 mode_param = MLX5_GET(ipsec_aso, aso->ctx, mode_parameter);
@@ -333,7 +332,6 @@ static void mlx5e_ipsec_handle_event(struct work_struct *_work)
                    !MLX5_GET(ipsec_aso, aso->ctx, hard_lft_arm) ||
                    !MLX5_GET(ipsec_aso, aso->ctx, remove_flow_enable))
                        xfrm_state_check_expire(sa_entry->x);
-       aso->use_cache = false;
 
 unlock:
        spin_unlock(&sa_entry->x->lock);
@@ -398,6 +396,7 @@ int mlx5e_ipsec_aso_init(struct mlx5e_ipsec *ipsec)
                goto err_aso_create;
        }
 
+       spin_lock_init(&aso->lock);
        ipsec->nb.notifier_call = mlx5e_ipsec_event;
        mlx5_notifier_register(mdev, &ipsec->nb);
 
@@ -456,13 +455,12 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
        struct mlx5e_hw_objs *res;
        struct mlx5_aso_wqe *wqe;
        u8 ds_cnt;
+       int ret;
 
        lockdep_assert_held(&sa_entry->x->lock);
-       if (aso->use_cache)
-               return 0;
-
        res = &mdev->mlx5e_res.hw_objs;
 
+       spin_lock_bh(&aso->lock);
        memset(aso->ctx, 0, sizeof(aso->ctx));
        wqe = mlx5_aso_get_wqe(aso->aso);
        ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
@@ -477,7 +475,9 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
        mlx5e_ipsec_aso_copy(ctrl, data);
 
        mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl);
-       return mlx5_aso_poll_cq(aso->aso, false);
+       ret = mlx5_aso_poll_cq(aso->aso, false);
+       spin_unlock_bh(&aso->lock);
+       return ret;
 }
 
 void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
index 9369a580743e1ffd9ce1955b64b4c87f7113234b..7f6b940830b314378630fc93940f24791314159a 100644 (file)
@@ -62,6 +62,7 @@ struct mlx5e_macsec_sa {
        u32 enc_key_id;
        u32 next_pn;
        sci_t sci;
+       ssci_t ssci;
        salt_t salt;
 
        struct rhash_head hash;
@@ -358,7 +359,6 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_macsec_obj_attrs obj_attrs;
        union mlx5e_macsec_rule *macsec_rule;
-       struct macsec_key *key;
        int err;
 
        obj_attrs.next_pn = sa->next_pn;
@@ -368,13 +368,9 @@ static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
        obj_attrs.aso_pdn = macsec->aso.pdn;
        obj_attrs.epn_state = sa->epn_state;
 
-       key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key;
-
        if (sa->epn_state.epn_enabled) {
-               obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) :
-                                          cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci);
-
-               memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt));
+               obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci);
+               memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt));
        }
 
        obj_attrs.replay_window = ctx->secy->replay_window;
@@ -499,10 +495,11 @@ mlx5e_macsec_get_macsec_device_context(const struct mlx5e_macsec *macsec,
 }
 
 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key,
-                             const pn_t *next_pn_halves)
+                             const pn_t *next_pn_halves, ssci_t ssci)
 {
        struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state;
 
+       sa->ssci = ssci;
        sa->salt = key->salt;
        epn_state->epn_enabled = 1;
        epn_state->epn_msb = next_pn_halves->upper;
@@ -550,7 +547,8 @@ static int mlx5e_macsec_add_txsa(struct macsec_context *ctx)
        tx_sa->assoc_num = assoc_num;
 
        if (secy->xpn)
-               update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves);
+               update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves,
+                                 ctx_tx_sa->ssci);
 
        err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len,
                                         MLX5_ACCEL_OBJ_MACSEC_KEY,
@@ -945,7 +943,8 @@ static int mlx5e_macsec_add_rxsa(struct macsec_context *ctx)
        rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id;
 
        if (ctx->secy->xpn)
-               update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves);
+               update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves,
+                                 ctx_rx_sa->ssci);
 
        err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len,
                                         MLX5_ACCEL_OBJ_MACSEC_KEY,
index 8d36e2de53a992c9542ccadda90234d8c688de06..abcc614b6191b7b09f60050d4f7ef1e295d21ea2 100644 (file)
@@ -1305,7 +1305,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.hw_objs.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
-       sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+       sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu) - ETH_FCS_LEN;
        sq->xsk_pool  = xsk_pool;
 
        sq->stats = sq->xsk_pool ?
@@ -4084,6 +4084,9 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
        struct mlx5e_vlan_table *vlan;
        struct mlx5e_params *params;
 
+       if (!netif_device_present(netdev))
+               return features;
+
        vlan = mlx5e_fs_get_vlan(priv->fs);
        mutex_lock(&priv->state_lock);
        params = &priv->channels.params;
index 75b9e1528fd29fd8fe70efde0e49229f95d69409..7d90e5b728548d947889071948be3540571f0da2 100644 (file)
@@ -191,7 +191,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
        if (err) {
                netdev_warn(priv->netdev, "vport %d error %d reading stats\n",
                            rep->vport, err);
-               return;
+               goto out;
        }
 
        #define MLX5_GET_CTR(p, x) \
@@ -241,6 +241,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
        rep_stats->tx_vport_rdma_multicast_bytes =
                MLX5_GET_CTR(out, received_ib_multicast.octets);
 
+out:
        kvfree(out);
 }
 
index c8820ab221694aae3afb8472640ccdf5fe65b5b2..3df455f6b1685cec0cd0f1c7aade984ebf155b53 100644 (file)
@@ -2419,7 +2419,7 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
 
        priv = mlx5i_epriv(netdev);
        tstamp = &priv->tstamp;
-       stats = rq->stats;
+       stats = &priv->channel_stats[rq->ix]->rq;
 
        flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
        g = (flags_rqpn >> 28) & 3;
index 9af2aa2922f5da605d3001e81abb8fd04074f004..243d5d7750beb54cba6b9f562d58fa8cbdcca6c2 100644 (file)
@@ -166,6 +166,7 @@ struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
  * it's different than the ht->mutex here.
  */
 static struct lock_class_key tc_ht_lock_key;
+static struct lock_class_key tc_ht_wq_key;
 
 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
@@ -1301,7 +1302,6 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
-               mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
                if (err)
                        return err;
        }
@@ -1359,8 +1359,10 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
        }
        mutex_unlock(&tc->t_lock);
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+               mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
                mlx5e_detach_mod_hdr(priv, flow);
+       }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
                mlx5_fc_destroy(priv->mdev, attr->counter);
@@ -5181,6 +5183,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
                return err;
 
        lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
+       lockdep_init_map(&tc->ht.run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
 
        mapping_id = mlx5_query_nic_system_image_guid(dev);
 
@@ -5287,6 +5290,7 @@ int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
                return err;
 
        lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
+       lockdep_init_map(&tc_ht->run_work.lockdep_map, "tc_ht_wq_key", &tc_ht_wq_key, 0);
 
        return 0;
 }
index 60a73990017c2a3683d112e0fc03a746276b2bbe..6b4c9ffad95b29db2e8975d9ebd0e8a526cfe2f3 100644 (file)
@@ -67,6 +67,7 @@ static void esw_acl_egress_lgcy_groups_destroy(struct mlx5_vport *vport)
 int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                              struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        struct mlx5_flow_destination drop_ctr_dst = {};
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_fc *drop_counter = NULL;
@@ -77,6 +78,7 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
         */
        int table_size = 2;
        int dest_num = 0;
+       int actions_flag;
        int err = 0;
 
        if (vport->egress.legacy.drop_counter) {
@@ -119,8 +121,11 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
                  vport->vport, vport->info.vlan, vport->info.qos);
 
        /* Allowed vlan rule */
+       actions_flag = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+       if (vst_mode_steering)
+               actions_flag |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
        err = esw_egress_acl_vlan_create(esw, vport, NULL, vport->info.vlan,
-                                        MLX5_FLOW_CONTEXT_ACTION_ALLOW);
+                                        actions_flag);
        if (err)
                goto out;
 
index b1a5199260f69627a151785eebcbad361d8100f3..093ed86a0acd8c2da6697d704c4ec738eb039c7b 100644 (file)
@@ -139,11 +139,14 @@ static void esw_acl_ingress_lgcy_groups_destroy(struct mlx5_vport *vport)
 int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                               struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        struct mlx5_flow_destination drop_ctr_dst = {};
        struct mlx5_flow_destination *dst = NULL;
        struct mlx5_flow_act flow_act = {};
        struct mlx5_flow_spec *spec = NULL;
        struct mlx5_fc *counter = NULL;
+       bool vst_check_cvlan = false;
+       bool vst_push_cvlan = false;
        /* The ingress acl table contains 4 groups
         * (2 active rules at the same time -
         *      1 allow rule from one of the first 3 groups.
@@ -203,7 +206,26 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                goto out;
        }
 
-       if (vport->info.vlan || vport->info.qos)
+       if ((vport->info.vlan || vport->info.qos)) {
+               if (vst_mode_steering)
+                       vst_push_cvlan = true;
+               else if (!MLX5_CAP_ESW(esw->dev, vport_cvlan_insert_always))
+                       vst_check_cvlan = true;
+       }
+
+       if (vst_check_cvlan || vport->info.spoofchk)
+               spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+
+       /* Create ingress allow rule */
+       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+       if (vst_push_cvlan) {
+               flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
+               flow_act.vlan[0].prio = vport->info.qos;
+               flow_act.vlan[0].vid = vport->info.vlan;
+               flow_act.vlan[0].ethtype = ETH_P_8021Q;
+       }
+
+       if (vst_check_cvlan)
                MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
                                 outer_headers.cvlan_tag);
 
@@ -218,9 +240,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                ether_addr_copy(smac_v, vport->info.mac);
        }
 
-       /* Create ingress allow rule */
-       spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
-       flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
                                                        &flow_act, NULL, 0);
        if (IS_ERR(vport->ingress.allow_rule)) {
@@ -232,6 +251,9 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
                goto out;
        }
 
+       if (!vst_check_cvlan && !vport->info.spoofchk)
+               goto out;
+
        memset(&flow_act, 0, sizeof(flow_act));
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
        /* Attach drop flow counter */
@@ -257,7 +279,8 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
        return 0;
 
 out:
-       esw_acl_ingress_lgcy_cleanup(esw, vport);
+       if (err)
+               esw_acl_ingress_lgcy_cleanup(esw, vport);
        kvfree(spec);
        return err;
 }
index 4f8a24d84a86a2ba516fc3cc0787b20fe76df2ce..75015d370922e61002798d4d08a37427326690a9 100644 (file)
@@ -22,15 +22,13 @@ struct mlx5_esw_rate_group {
 };
 
 static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx,
-                              u32 parent_ix, u32 tsar_ix,
-                              u32 max_rate, u32 bw_share)
+                              u32 tsar_ix, u32 max_rate, u32 bw_share)
 {
        u32 bitmask = 0;
 
        if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
                return -EOPNOTSUPP;
 
-       MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix);
        MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate);
        MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
@@ -51,7 +49,7 @@ static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_g
        int err;
 
        err = esw_qos_tsar_config(dev, sched_ctx,
-                                 esw->qos.root_tsar_ix, group->tsar_ix,
+                                 group->tsar_ix,
                                  max_rate, bw_share);
        if (err)
                NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed");
@@ -67,23 +65,13 @@ static int esw_qos_vport_config(struct mlx5_eswitch *esw,
                                struct netlink_ext_ack *extack)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
-       struct mlx5_esw_rate_group *group = vport->qos.group;
        struct mlx5_core_dev *dev = esw->dev;
-       u32 parent_tsar_ix;
-       void *vport_elem;
        int err;
 
        if (!vport->qos.enabled)
                return -EIO;
 
-       parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix;
-       MLX5_SET(scheduling_context, sched_ctx, element_type,
-                SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
-       vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
-                                 element_attributes);
-       MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
-
-       err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix,
+       err = esw_qos_tsar_config(dev, sched_ctx, vport->qos.esw_tsar_ix,
                                  max_rate, bw_share);
        if (err) {
                esw_warn(esw->dev,
index 527e4bffda8d4696ad9a858bafbf0fcba6f42fb9..9daf55e90367b17c8d975e862dfc019ad4f0db49 100644 (file)
@@ -161,10 +161,17 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
                         esw_vport_context.vport_cvlan_strip, 1);
 
        if (set_flags & SET_VLAN_INSERT) {
-               /* insert only if no vlan in packet */
-               MLX5_SET(modify_esw_vport_context_in, in,
-                        esw_vport_context.vport_cvlan_insert, 1);
-
+               if (MLX5_CAP_ESW(dev, vport_cvlan_insert_always)) {
+                       /* insert either if vlan exist in packet or not */
+                       MLX5_SET(modify_esw_vport_context_in, in,
+                                esw_vport_context.vport_cvlan_insert,
+                                MLX5_VPORT_CVLAN_INSERT_ALWAYS);
+               } else {
+                       /* insert only if no vlan in packet */
+                       MLX5_SET(modify_esw_vport_context_in, in,
+                                esw_vport_context.vport_cvlan_insert,
+                                MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN);
+               }
                MLX5_SET(modify_esw_vport_context_in, in,
                         esw_vport_context.cvlan_pcp, qos);
                MLX5_SET(modify_esw_vport_context_in, in,
@@ -809,6 +816,7 @@ out_free:
 
 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
 {
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        u16 vport_num = vport->vport;
        int flags;
        int err;
@@ -839,8 +847,9 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
 
        flags = (vport->info.vlan || vport->info.qos) ?
                SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
-       modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
-                              vport->info.qos, flags);
+       if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering)
+               modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
+                                      vport->info.qos, flags);
 
        return 0;
 
@@ -1455,6 +1464,7 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
        mlx5_lag_disable_change(esw->dev);
        down_write(&esw->mode_lock);
        mlx5_eswitch_disable_locked(esw);
+       esw->mode = MLX5_ESWITCH_LEGACY;
        up_write(&esw->mode_lock);
        mlx5_lag_enable_change(esw->dev);
 }
@@ -1848,6 +1858,7 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
                                  u16 vport, u16 vlan, u8 qos, u8 set_flags)
 {
        struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+       bool vst_mode_steering = esw_vst_mode_is_steering(esw);
        int err = 0;
 
        if (IS_ERR(evport))
@@ -1855,9 +1866,11 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
        if (vlan > 4095 || qos > 7)
                return -EINVAL;
 
-       err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
-       if (err)
-               return err;
+       if (esw->mode == MLX5_ESWITCH_OFFLOADS || !vst_mode_steering) {
+               err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
+               if (err)
+                       return err;
+       }
 
        evport->info.vlan = vlan;
        evport->info.qos = qos;
index 5a85a5d32be7e1829075e7cc72bf9c64b9f14e5c..92644fbb508164ed9e3f71d62db1a000c32bb2e8 100644 (file)
@@ -527,6 +527,12 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
                                  u16 vport, u16 vlan, u8 qos, u8 set_flags);
 
+static inline bool esw_vst_mode_is_steering(struct mlx5_eswitch *esw)
+{
+       return (MLX5_CAP_ESW_EGRESS_ACL(esw->dev, pop_vlan) &&
+               MLX5_CAP_ESW_INGRESS_ACL(esw->dev, push_vlan));
+}
+
 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
                                                       u8 vlan_depth)
 {
index e455b215c70885a93b34cc68579669297c39c667..c981fa77f43985ba0efba503f6f18737b74bbf2a 100644 (file)
@@ -143,7 +143,7 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
                if (mlx5_esw_indir_table_decap_vport(attr))
                        vport = mlx5_esw_indir_table_decap_vport(attr);
 
-               if (attr && !attr->chain && esw_attr->int_port)
+               if (!attr->chain && esw_attr && esw_attr->int_port)
                        metadata =
                                mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
                else
@@ -4143,8 +4143,6 @@ int mlx5_devlink_port_fn_migratable_set(struct devlink_port *port, bool enable,
        }
 
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
-       memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
-              MLX5_UN_SZ_BYTES(hca_cap_union));
        MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1);
 
        err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport,
@@ -4236,8 +4234,6 @@ int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
        }
 
        hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
-       memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability),
-              MLX5_UN_SZ_BYTES(hca_cap_union));
        MLX5_SET(cmd_hca_cap, hca_caps, roce, enable);
 
        err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
index 86ed87d704f7d7a81a8cc69653d3a52e0b184d61..879555ba847ddcd6174a2e3036d38eb7a5639c88 100644 (file)
@@ -674,6 +674,13 @@ static void mlx5_fw_fatal_reporter_err_work(struct work_struct *work)
        dev = container_of(priv, struct mlx5_core_dev, priv);
        devlink = priv_to_devlink(dev);
 
+       mutex_lock(&dev->intf_state_mutex);
+       if (test_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags)) {
+               mlx5_core_err(dev, "health works are not permitted at this stage\n");
+               mutex_unlock(&dev->intf_state_mutex);
+               return;
+       }
+       mutex_unlock(&dev->intf_state_mutex);
        enter_error_state(dev, false);
        if (IS_ERR_OR_NULL(health->fw_fatal_reporter)) {
                devl_lock(devlink);
index c247cca154e9c62ea5a478390fb9a40970fe83da..eff92dc0927c171796c8b7f97f1a11c09304ccf4 100644 (file)
@@ -90,9 +90,21 @@ static void mlx5i_get_ringparam(struct net_device *dev,
 static int mlx5i_set_channels(struct net_device *dev,
                              struct ethtool_channels *ch)
 {
-       struct mlx5e_priv *priv = mlx5i_epriv(dev);
+       struct mlx5i_priv *ipriv = netdev_priv(dev);
+       struct mlx5e_priv *epriv = mlx5i_epriv(dev);
+
+       /* rtnl lock protects from race between this ethtool op and sub
+        * interface ndo_init/uninit.
+        */
+       ASSERT_RTNL();
+       if (ipriv->num_sub_interfaces > 0) {
+               mlx5_core_warn(epriv->mdev,
+                              "can't change number of channels for interfaces with sub interfaces (%u)\n",
+                              ipriv->num_sub_interfaces);
+               return -EINVAL;
+       }
 
-       return mlx5e_ethtool_set_channels(priv, ch);
+       return mlx5e_ethtool_set_channels(epriv, ch);
 }
 
 static void mlx5i_get_channels(struct net_device *dev,
index 7c5c500fd215e11b68f32e188ca990797b6f3f69..911cf4d23964548692bb3d3c6b080b3a59ac7776 100644 (file)
@@ -71,6 +71,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
        params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
        params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
        params->tunneled_offload_en = false;
+
+       /* CQE compression is not supported for IPoIB */
+       params->rx_cqe_compress_def = false;
+       MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
 }
 
 /* Called directly after IPoIB netdevice was created to initialize SW structs */
@@ -156,6 +160,44 @@ void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->tx_dropped = sstats->tx_queue_dropped;
 }
 
+struct net_device *mlx5i_parent_get(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5i_priv *ipriv, *parent_ipriv;
+       struct net_device *parent_dev;
+       int parent_ifindex;
+
+       ipriv = priv->ppriv;
+
+       parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev);
+       parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex);
+       if (!parent_dev)
+               return NULL;
+
+       parent_ipriv = netdev_priv(parent_dev);
+
+       ASSERT_RTNL();
+       parent_ipriv->num_sub_interfaces++;
+
+       ipriv->parent_dev = parent_dev;
+
+       return parent_dev;
+}
+
+void mlx5i_parent_put(struct net_device *netdev)
+{
+       struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+       struct mlx5i_priv *ipriv, *parent_ipriv;
+
+       ipriv = priv->ppriv;
+       parent_ipriv = netdev_priv(ipriv->parent_dev);
+
+       ASSERT_RTNL();
+       parent_ipriv->num_sub_interfaces--;
+
+       dev_put(ipriv->parent_dev);
+}
+
 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
 {
        struct mlx5_core_dev *mdev = priv->mdev;
index 99d46fda9f82fa9a509a7da78f91bad57a1753c9..f3f2af972020afee7438b4ac91508d5ea59ac40d 100644 (file)
@@ -54,9 +54,11 @@ struct mlx5i_priv {
        struct rdma_netdev rn; /* keep this first */
        u32 qpn;
        bool   sub_interface;
+       u32    num_sub_interfaces;
        u32    qkey;
        u16    pkey_index;
        struct mlx5i_pkey_qpn_ht *qpn_htbl;
+       struct net_device *parent_dev;
        char  *mlx5e_priv[];
 };
 
@@ -117,5 +119,9 @@ void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
                   struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more);
 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
 
+/* Reference management for child to parent interfaces. */
+struct net_device *mlx5i_parent_get(struct net_device *netdev);
+void mlx5i_parent_put(struct net_device *netdev);
+
 #endif /* CONFIG_MLX5_CORE_IPOIB */
 #endif /* __MLX5E_IPOB_H__ */
index 4d9c9e49645c9e1b08daa4813de048b74233c146..03e681297937f8687d4f5373399898b91d541136 100644 (file)
@@ -158,21 +158,28 @@ static int mlx5i_pkey_dev_init(struct net_device *dev)
        struct mlx5e_priv *priv = mlx5i_epriv(dev);
        struct mlx5i_priv *ipriv, *parent_ipriv;
        struct net_device *parent_dev;
-       int parent_ifindex;
 
        ipriv = priv->ppriv;
 
-       /* Get QPN to netdevice hash table from parent */
-       parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
-       parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+       /* Link to parent */
+       parent_dev = mlx5i_parent_get(dev);
        if (!parent_dev) {
                mlx5_core_warn(priv->mdev, "failed to get parent device\n");
                return -EINVAL;
        }
 
+       if (dev->num_rx_queues < parent_dev->real_num_rx_queues) {
+               mlx5_core_warn(priv->mdev,
+                              "failed to create child device with rx queues [%d] less than parent's [%d]\n",
+                              dev->num_rx_queues,
+                              parent_dev->real_num_rx_queues);
+               mlx5i_parent_put(dev);
+               return -EINVAL;
+       }
+
+       /* Get QPN to netdevice hash table from parent */
        parent_ipriv = netdev_priv(parent_dev);
        ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
-       dev_put(parent_dev);
 
        return mlx5i_dev_init(dev);
 }
@@ -184,6 +191,7 @@ static int mlx5i_pkey_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 
 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
 {
+       mlx5i_parent_put(netdev);
        return mlx5i_dev_cleanup(netdev);
 }
 
index 32c3e0a649a753fabc8508634a52323c158bfedb..ad32b80e8501855a19bcb21d5f10687d14b9ba21 100644 (file)
@@ -228,6 +228,7 @@ static void mlx5_ldev_free(struct kref *ref)
        if (ldev->nb.notifier_call)
                unregister_netdevice_notifier_net(&init_net, &ldev->nb);
        mlx5_lag_mp_cleanup(ldev);
+       cancel_delayed_work_sync(&ldev->bond_work);
        destroy_workqueue(ldev->wq);
        mlx5_lag_mpesw_cleanup(ldev);
        mutex_destroy(&ldev->lock);
index 69cfe60c558a7400299901f9d73edea3d63b566b..69318b1432688183477d17615276d52e2d6b5815 100644 (file)
@@ -681,7 +681,7 @@ static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
 static const struct ptp_clock_info mlx5_ptp_clock_info = {
        .owner          = THIS_MODULE,
        .name           = "mlx5_ptp",
-       .max_adj        = 100000000,
+       .max_adj        = 50000000,
        .n_alarm        = 0,
        .n_ext_ts       = 0,
        .n_per_out      = 0,
index 7f5db13e3550c0cdca124a6e714410d7b12cb505..3d5f2a4b1fed4f8d5d6bf235783e4df06c0eb757 100644 (file)
@@ -613,7 +613,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
                MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
                         MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
 
-       if (MLX5_CAP_GEN(dev, roce_rw_supported))
+       if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))
                MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
                         mlx5_is_roce_on(dev));
 
@@ -1050,6 +1050,8 @@ err_rl_cleanup:
 err_tables_cleanup:
        mlx5_geneve_destroy(dev->geneve);
        mlx5_vxlan_destroy(dev->vxlan);
+       mlx5_cleanup_clock(dev);
+       mlx5_cleanup_reserved_gids(dev);
        mlx5_cq_debugfs_cleanup(dev);
        mlx5_fw_reset_cleanup(dev);
 err_events_cleanup:
@@ -2096,7 +2098,7 @@ static void mlx5_core_verify_params(void)
        }
 }
 
-static int __init init(void)
+static int __init mlx5_init(void)
 {
        int err;
 
@@ -2131,7 +2133,7 @@ err_debug:
        return err;
 }
 
-static void __exit cleanup(void)
+static void __exit mlx5_cleanup(void)
 {
        mlx5e_cleanup();
        mlx5_sf_driver_unregister();
@@ -2139,5 +2141,5 @@ static void __exit cleanup(void)
        mlx5_unregister_debugfs();
 }
 
-module_init(init);
-module_exit(cleanup);
+module_init(mlx5_init);
+module_exit(mlx5_cleanup);
index 0777be24a3074a7a078b091cb30b93d4d33df9e1..8bce730b5c5befdc4e553810eae0ab1400fe836f 100644 (file)
@@ -62,13 +62,12 @@ int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id)
        return mlx5_qos_create_inner_node(mdev, MLX5_QOS_DEFAULT_DWRR_UID, 0, 0, id);
 }
 
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev,
                         u32 bw_share, u32 max_avg_bw, u32 id)
 {
        u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
        u32 bitmask = 0;
 
-       MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_id);
        MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
        MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_avg_bw);
 
index 125e4e47e6f71f2ecf31a960d5eca467ab8a5b92..624ce822b7f596e1c6ca1ce04b0bf0606e894abc 100644 (file)
@@ -23,7 +23,7 @@ int mlx5_qos_create_leaf_node(struct mlx5_core_dev *mdev, u32 parent_id,
 int mlx5_qos_create_inner_node(struct mlx5_core_dev *mdev, u32 parent_id,
                               u32 bw_share, u32 max_avg_bw, u32 *id);
 int mlx5_qos_create_root_node(struct mlx5_core_dev *mdev, u32 *id);
-int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 parent_id, u32 bw_share,
+int mlx5_qos_update_node(struct mlx5_core_dev *mdev, u32 bw_share,
                         u32 max_avg_bw, u32 id);
 int mlx5_qos_destroy_node(struct mlx5_core_dev *mdev, u32 id);
 
index 74cbe53ee9dbb1e042b1c5bb30f3acfd884f802c..b851141e03de3c67e1f422460d1d5cc571356066 100644 (file)
@@ -3,7 +3,12 @@
 
 #include "dr_types.h"
 
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
+/* don't try to optimize STE allocation if the stack is too constaraining */
+#define DR_RULE_MAX_STES_OPTIMIZED 0
+#else
 #define DR_RULE_MAX_STES_OPTIMIZED 5
+#endif
 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
 
 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
@@ -1218,10 +1223,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
 
        mlx5dr_domain_nic_unlock(nic_dmn);
 
-       if (unlikely(!hw_ste_arr_is_opt))
-               kfree(hw_ste_arr);
-
-       return 0;
+       goto out;
 
 free_rule:
        dr_rule_clean_rule_members(rule, nic_rule);
@@ -1238,6 +1240,7 @@ remove_from_nic_tbl:
 free_hw_ste:
        mlx5dr_domain_nic_unlock(nic_dmn);
 
+out:
        if (unlikely(!hw_ste_arr_is_opt))
                kfree(hw_ste_arr);
 
index c22c3ac4e2a18d45babeb8bd811e50991b824cee..09e32778b012d18c4a15de40b025dbbd901abf8c 100644 (file)
@@ -2951,7 +2951,7 @@ struct mlxsw_sp_nexthop_group_info {
           gateway:1, /* routes using the group use a gateway */
           is_resilient:1;
        struct list_head list; /* member in nh_res_grp_list */
-       struct mlxsw_sp_nexthop nexthops[0];
+       struct mlxsw_sp_nexthop nexthops[];
 #define nh_rif nexthops[0].rif
 };
 
index cadde20505ba068953112950532b42d3896009e9..580c91d24a5284e72066c9a3e32092454803c56d 100644 (file)
@@ -1043,11 +1043,6 @@ static int lan966x_probe(struct platform_device *pdev)
                lan966x->base_mac[5] &= 0xf0;
        }
 
-       ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
-       if (!ports)
-               return dev_err_probe(&pdev->dev, -ENODEV,
-                                    "no ethernet-ports child found\n");
-
        err = lan966x_create_targets(pdev, lan966x);
        if (err)
                return dev_err_probe(&pdev->dev, err,
@@ -1125,6 +1120,11 @@ static int lan966x_probe(struct platform_device *pdev)
                }
        }
 
+       ports = device_get_named_child_node(&pdev->dev, "ethernet-ports");
+       if (!ports)
+               return dev_err_probe(&pdev->dev, -ENODEV,
+                                    "no ethernet-ports child found\n");
+
        /* init switch */
        lan966x_init(lan966x);
        lan966x_stats_init(lan966x);
@@ -1162,6 +1162,8 @@ static int lan966x_probe(struct platform_device *pdev)
                        goto cleanup_ports;
        }
 
+       fwnode_handle_put(ports);
+
        lan966x_mdb_init(lan966x);
        err = lan966x_fdb_init(lan966x);
        if (err)
@@ -1191,6 +1193,7 @@ cleanup_fdb:
        lan966x_fdb_deinit(lan966x);
 
 cleanup_ports:
+       fwnode_handle_put(ports);
        fwnode_handle_put(portnp);
 
        lan966x_cleanup_ports(lan966x);
index 1a61c6cdb0779f84e90b765050a83ab4432ac6af..0050fcb988b75e01e56abb3272caf194d01870dd 100644 (file)
@@ -381,7 +381,7 @@ int lan966x_port_pcs_set(struct lan966x_port *port,
        }
 
        /* Take PCS out of reset */
-       lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(2) |
+       lan_rmw(DEV_CLOCK_CFG_LINK_SPEED_SET(LAN966X_SPEED_1000) |
                DEV_CLOCK_CFG_PCS_RX_RST_SET(0) |
                DEV_CLOCK_CFG_PCS_TX_RST_SET(0),
                DEV_CLOCK_CFG_LINK_SPEED |
index f9ebfaafbebc2678383ee9779c9c1eccfef636b8..a8348437dd87f6304ef0a9004e5f35964a88f92a 100644 (file)
@@ -1073,6 +1073,9 @@ void lan966x_ptp_deinit(struct lan966x *lan966x)
        struct lan966x_port *port;
        int i;
 
+       if (!lan966x->ptp)
+               return;
+
        for (i = 0; i < lan966x->num_phys_ports; i++) {
                port = lan966x->ports[i];
                if (!port)
index d8dc9fbb81e1a789fefbee80d22561364e3ae891..a54c0426a35f3935edbf35c793f0daa5a9237cd9 100644 (file)
@@ -95,10 +95,7 @@ lan966x_vcap_is2_get_port_keysets(struct net_device *dev, int lookup,
        bool found = false;
        u32 val;
 
-       /* Check if the port keyset selection is enabled */
        val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port));
-       if (!ANA_VCAP_S2_CFG_ENA_GET(val))
-               return -ENOENT;
 
        /* Collect all keysets for the port in a list */
        if (l3_proto == ETH_P_ALL)
index d25f4f09faa06a03387288703a7ad82d0784fbcc..3c5d4fe9937375eecbe9d286e3ecf0b95b2f75e6 100644 (file)
@@ -834,7 +834,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
        if (err)
                goto cleanup_config;
 
-       if (!of_get_mac_address(np, sparx5->base_mac)) {
+       if (of_get_mac_address(np, sparx5->base_mac)) {
                dev_info(sparx5->dev, "MAC addr was not set, use random MAC\n");
                eth_random_addr(sparx5->base_mac);
                sparx5->base_mac[5] = 0;
index da33f09facb916db18966cf457fabf202135979e..432d79d691c2952d09432239b0e60ede8fe938f7 100644 (file)
@@ -617,6 +617,9 @@ struct nfp_net_dp {
  * @vnic_no_name:      For non-port PF vNIC make ndo_get_phys_port_name return
  *                     -EOPNOTSUPP to keep backwards compatibility (set by app)
  * @port:              Pointer to nfp_port structure if vNIC is a port
+ * @mc_lock:           Protect mc_addrs list
+ * @mc_addrs:          List of mc addrs to add/del to HW
+ * @mc_work:           Work to update mc addrs
  * @app_priv:          APP private data for this vNIC
  */
 struct nfp_net {
@@ -718,6 +721,10 @@ struct nfp_net {
 
        struct nfp_port *port;
 
+       spinlock_t mc_lock;
+       struct list_head mc_addrs;
+       struct work_struct mc_work;
+
        void *app_priv;
 };
 
index 09053373288fe7899fa4213fae490f185c250bbd..18fc9971f1c8f268c9ec754fe530b697a39ffb0b 100644 (file)
@@ -1334,9 +1334,14 @@ err_unlock:
        return err;
 }
 
-static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr, const u32 cmd)
+struct nfp_mc_addr_entry {
+       u8 addr[ETH_ALEN];
+       u32 cmd;
+       struct list_head list;
+};
+
+static int nfp_net_mc_cfg(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
 {
-       struct nfp_net *nn = netdev_priv(netdev);
        int ret;
 
        ret = nfp_net_mbox_lock(nn, NFP_NET_CFG_MULTICAST_SZ);
@@ -1351,6 +1356,25 @@ static int nfp_net_mc_cfg(struct net_device *netdev, const unsigned char *addr,
        return nfp_net_mbox_reconfig_and_unlock(nn, cmd);
 }
 
+static int nfp_net_mc_prep(struct nfp_net *nn, const unsigned char *addr, const u32 cmd)
+{
+       struct nfp_mc_addr_entry *entry;
+
+       entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+       if (!entry)
+               return -ENOMEM;
+
+       ether_addr_copy(entry->addr, addr);
+       entry->cmd = cmd;
+       spin_lock_bh(&nn->mc_lock);
+       list_add_tail(&entry->list, &nn->mc_addrs);
+       spin_unlock_bh(&nn->mc_lock);
+
+       schedule_work(&nn->mc_work);
+
+       return 0;
+}
+
 static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
 {
        struct nfp_net *nn = netdev_priv(netdev);
@@ -1361,12 +1385,35 @@ static int nfp_net_mc_sync(struct net_device *netdev, const unsigned char *addr)
                return -EINVAL;
        }
 
-       return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
+       return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_ADD);
 }
 
 static int nfp_net_mc_unsync(struct net_device *netdev, const unsigned char *addr)
 {
-       return nfp_net_mc_cfg(netdev, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
+       struct nfp_net *nn = netdev_priv(netdev);
+
+       return nfp_net_mc_prep(nn, addr, NFP_NET_CFG_MBOX_CMD_MULTICAST_DEL);
+}
+
+static void nfp_net_mc_addr_config(struct work_struct *work)
+{
+       struct nfp_net *nn = container_of(work, struct nfp_net, mc_work);
+       struct nfp_mc_addr_entry *entry, *tmp;
+       struct list_head tmp_list;
+
+       INIT_LIST_HEAD(&tmp_list);
+
+       spin_lock_bh(&nn->mc_lock);
+       list_splice_init(&nn->mc_addrs, &tmp_list);
+       spin_unlock_bh(&nn->mc_lock);
+
+       list_for_each_entry_safe(entry, tmp, &tmp_list, list) {
+               if (nfp_net_mc_cfg(nn, entry->addr, entry->cmd))
+                       nn_err(nn, "Config mc address to HW failed.\n");
+
+               list_del(&entry->list);
+               kfree(entry);
+       }
 }
 
 static void nfp_net_set_rx_mode(struct net_device *netdev)
@@ -2633,6 +2680,11 @@ int nfp_net_init(struct nfp_net *nn)
 
        if (!nn->dp.netdev)
                return 0;
+
+       spin_lock_init(&nn->mc_lock);
+       INIT_LIST_HEAD(&nn->mc_addrs);
+       INIT_WORK(&nn->mc_work, nfp_net_mc_addr_config);
+
        return register_netdev(nn->dp.netdev);
 
 err_clean_mbox:
@@ -2652,5 +2704,6 @@ void nfp_net_clean(struct nfp_net *nn)
        unregister_netdev(nn->dp.netdev);
        nfp_net_ipsec_clean(nn);
        nfp_ccm_mbox_clean(nn);
+       flush_work(&nn->mc_work);
        nfp_net_reconfig_wait_posted(nn);
 }
index 86ecb080b153686db0c5275e2ac6832b48264505..cdcead614e9fa83b0c27019d3cb67d2485fdc890 100644 (file)
@@ -1832,7 +1832,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
                                            struct qed_ptt *p_ptt,
                                            u32 image_type,
                                            u32 *nvram_offset_bytes,
-                                           u32 *nvram_size_bytes)
+                                           u32 *nvram_size_bytes,
+                                           bool b_can_sleep)
 {
        u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
        struct mcp_file_att file_att;
@@ -1846,7 +1847,8 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
                                        &ret_mcp_resp,
                                        &ret_mcp_param,
                                        &ret_txn_size,
-                                       (u32 *)&file_att, false);
+                                       (u32 *)&file_att,
+                                       b_can_sleep);
 
        /* Check response */
        if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
@@ -1873,7 +1875,9 @@ static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
 static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
                                      struct qed_ptt *p_ptt,
                                      u32 nvram_offset_bytes,
-                                     u32 nvram_size_bytes, u32 *ret_buf)
+                                     u32 nvram_size_bytes,
+                                     u32 *ret_buf,
+                                     bool b_can_sleep)
 {
        u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
        s32 bytes_left = nvram_size_bytes;
@@ -1899,7 +1903,7 @@ static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
                                       &ret_mcp_resp,
                                       &ret_mcp_param, &ret_read_size,
                                       (u32 *)((u8 *)ret_buf + read_offset),
-                                      false))
+                                      b_can_sleep))
                        return DBG_STATUS_NVRAM_READ_FAILED;
 
                /* Check response */
@@ -3380,7 +3384,8 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
                                      p_ptt,
                                      NVM_TYPE_HW_DUMP_OUT,
                                      &hw_dump_offset_bytes,
-                                     &hw_dump_size_bytes);
+                                     &hw_dump_size_bytes,
+                                     false);
        if (status != DBG_STATUS_OK)
                return 0;
 
@@ -3397,7 +3402,9 @@ static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
                status = qed_nvram_read(p_hwfn,
                                        p_ptt,
                                        hw_dump_offset_bytes,
-                                       hw_dump_size_bytes, dump_buf + offset);
+                                       hw_dump_size_bytes,
+                                       dump_buf + offset,
+                                       false);
                if (status != DBG_STATUS_OK) {
                        DP_NOTICE(p_hwfn,
                                  "Failed to read MCP HW Dump image from NVRAM\n");
@@ -4123,7 +4130,9 @@ static enum dbg_status qed_mcp_trace_get_meta_info(struct qed_hwfn *p_hwfn,
        return qed_find_nvram_image(p_hwfn,
                                    p_ptt,
                                    nvram_image_type,
-                                   trace_meta_offset, trace_meta_size);
+                                   trace_meta_offset,
+                                   trace_meta_size,
+                                   true);
 }
 
 /* Reads the MCP Trace meta data from NVRAM into the specified buffer */
@@ -4139,7 +4148,10 @@ static enum dbg_status qed_mcp_trace_read_meta(struct qed_hwfn *p_hwfn,
        /* Read meta data from NVRAM */
        status = qed_nvram_read(p_hwfn,
                                p_ptt,
-                               nvram_offset_in_bytes, size_in_bytes, buf);
+                               nvram_offset_in_bytes,
+                               size_in_bytes,
+                               buf,
+                               true);
        if (status != DBG_STATUS_OK)
                return status;
 
index dbb800769cb63e1794293950b8671eaf7cda4d63..c95d56e56c59a18e70c0b994a37013eb81108b5c 100644 (file)
@@ -2505,7 +2505,13 @@ int qlcnic_83xx_init(struct qlcnic_adapter *adapter)
                goto disable_mbx_intr;
 
        qlcnic_83xx_clear_function_resources(adapter);
-       qlcnic_dcb_enable(adapter->dcb);
+
+       err = qlcnic_dcb_enable(adapter->dcb);
+       if (err) {
+               qlcnic_dcb_free(adapter->dcb);
+               goto disable_mbx_intr;
+       }
+
        qlcnic_83xx_initialize_nic(adapter, 1);
        qlcnic_dcb_get_info(adapter->dcb);
 
index 7519773eaca6ee5caa29f40c6f8aa891464760bd..22afa2be85fdba9a5e68c3a1179467bd8102b8cb 100644 (file)
@@ -41,11 +41,6 @@ struct qlcnic_dcb {
        unsigned long                   state;
 };
 
-static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
-{
-       kfree(dcb);
-}
-
 static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
 {
        if (dcb && dcb->ops->get_hw_capability)
@@ -112,9 +107,8 @@ static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_dcb *dcb)
                dcb->ops->init_dcbnl_ops(dcb);
 }
 
-static inline void qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
+static inline int qlcnic_dcb_enable(struct qlcnic_dcb *dcb)
 {
-       if (dcb && qlcnic_dcb_attach(dcb))
-               qlcnic_clear_dcb_ops(dcb);
+       return dcb ? qlcnic_dcb_attach(dcb) : 0;
 }
 #endif
index 28476b982bab6c0a3778947b294a1ae484087ce0..44dac3c0908eb8a0bc534f1e01850924b83ab39a 100644 (file)
@@ -2599,7 +2599,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                         "Device does not support MSI interrupts\n");
 
        if (qlcnic_82xx_check(adapter)) {
-               qlcnic_dcb_enable(adapter->dcb);
+               err = qlcnic_dcb_enable(adapter->dcb);
+               if (err) {
+                       qlcnic_dcb_free(adapter->dcb);
+                       dev_err(&pdev->dev, "Failed to enable DCB\n");
+                       goto err_out_free_hw;
+               }
+
                qlcnic_dcb_get_info(adapter->dcb);
                err = qlcnic_setup_intr(adapter);
 
index a9dcc98b6af1891b31375304d8d25f44d3d8b2fe..dadd61bccfe72cc323d2aa6ebb6420c04c7fc021 100644 (file)
@@ -1996,10 +1996,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
 
                /* 8168F family. */
                { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 },
-               /* It seems this chip version never made it to
-                * the wild. Let's disable detection.
-                * { 0x7cf, 0x481,      RTL_GIGA_MAC_VER_36 },
-                */
+               { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 },
                { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 },
 
                /* 8168E family. */
@@ -2210,28 +2207,6 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-static void rtl_wol_enable_rx(struct rtl8169_private *tp)
-{
-       if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
-               RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
-                       AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
-}
-
-static void rtl_prepare_power_down(struct rtl8169_private *tp)
-{
-       if (tp->dash_type != RTL_DASH_NONE)
-               return;
-
-       if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_33)
-               rtl_ephy_write(tp, 0x19, 0xff64);
-
-       if (device_may_wakeup(tp_to_dev(tp))) {
-               phy_speed_down(tp->phydev, false);
-               rtl_wol_enable_rx(tp);
-       }
-}
-
 static void rtl_init_rxcfg(struct rtl8169_private *tp)
 {
        switch (tp->mac_version) {
@@ -2455,6 +2430,31 @@ static void rtl_enable_rxdvgate(struct rtl8169_private *tp)
        rtl_wait_txrx_fifo_empty(tp);
 }
 
+static void rtl_wol_enable_rx(struct rtl8169_private *tp)
+{
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_25)
+               RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
+                       AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_40)
+               rtl_disable_rxdvgate(tp);
+}
+
+static void rtl_prepare_power_down(struct rtl8169_private *tp)
+{
+       if (tp->dash_type != RTL_DASH_NONE)
+               return;
+
+       if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+           tp->mac_version == RTL_GIGA_MAC_VER_33)
+               rtl_ephy_write(tp, 0x19, 0xff64);
+
+       if (device_may_wakeup(tp_to_dev(tp))) {
+               phy_speed_down(tp->phydev, false);
+               rtl_wol_enable_rx(tp);
+       }
+}
+
 static void rtl_set_tx_config_registers(struct rtl8169_private *tp)
 {
        u32 val = TX_DMA_BURST << TxDMAShift |
@@ -3872,7 +3872,7 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
        netdev_reset_queue(tp->dev);
 }
 
-static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
+static void rtl8169_cleanup(struct rtl8169_private *tp)
 {
        napi_disable(&tp->napi);
 
@@ -3884,9 +3884,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
 
        rtl_rx_close(tp);
 
-       if (going_down && tp->dev->wol_enabled)
-               goto no_reset;
-
        switch (tp->mac_version) {
        case RTL_GIGA_MAC_VER_28:
        case RTL_GIGA_MAC_VER_31:
@@ -3907,7 +3904,7 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
        }
 
        rtl_hw_reset(tp);
-no_reset:
+
        rtl8169_tx_clear(tp);
        rtl8169_init_ring_indexes(tp);
 }
@@ -3918,7 +3915,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
 
        netif_stop_queue(tp->dev);
 
-       rtl8169_cleanup(tp, false);
+       rtl8169_cleanup(tp);
 
        for (i = 0; i < NUM_RX_DESC; i++)
                rtl8169_mark_to_asic(tp->RxDescArray + i);
@@ -4605,7 +4602,7 @@ static void rtl8169_down(struct rtl8169_private *tp)
        pci_clear_master(tp->pci_dev);
        rtl_pci_commit(tp);
 
-       rtl8169_cleanup(tp, true);
+       rtl8169_cleanup(tp);
        rtl_disable_exit_l1(tp);
        rtl_prepare_power_down(tp);
 }
index e42ceaa0099fdb5b4add615add4dcc592a31bc14..6441892636dba2fd567c4f58225816f797b93acb 100644 (file)
@@ -1578,6 +1578,7 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
 {
        struct platform_device *pdev = priv->pdev;
        struct rswitch_device *rdev;
+       struct device_node *port;
        struct net_device *ndev;
        int err;
 
@@ -1606,7 +1607,9 @@ static int rswitch_device_alloc(struct rswitch_private *priv, int index)
 
        netif_napi_add(ndev, &rdev->napi, rswitch_poll);
 
-       err = of_get_ethdev_address(pdev->dev.of_node, ndev);
+       port = rswitch_get_port_node(rdev);
+       err = of_get_ethdev_address(port, ndev);
+       of_node_put(port);
        if (err) {
                if (is_valid_ether_addr(rdev->etha->mac_addr))
                        eth_hw_addr_set(ndev, rdev->etha->mac_addr);
@@ -1786,6 +1789,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        ret = rswitch_init(priv);
+       if (ret < 0) {
+               pm_runtime_put(&pdev->dev);
+               pm_runtime_disable(&pdev->dev);
+               return ret;
+       }
 
        device_set_wakeup_capable(&pdev->dev, 1);
 
index d42e1afb65213419ada4168f1c227a1d7c815c23..2f7d8e4561d920cf8e2dc7524c6eed9b9dbcbb00 100644 (file)
@@ -90,7 +90,6 @@ struct mediatek_dwmac_plat_data {
 struct mediatek_dwmac_variant {
        int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
        int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
-       void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed);
 
        /* clock ids to be requested */
        const char * const *clk_list;
@@ -443,32 +442,9 @@ static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
        return 0;
 }
 
-static void mt8195_fix_mac_speed(void *priv, unsigned int speed)
-{
-       struct mediatek_dwmac_plat_data *priv_plat = priv;
-
-       if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) {
-               /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL,
-                * when link speed is 1Gbps with RGMII interface,
-                * Fall back to delay macro circuit for 10/100Mbps link speed.
-                */
-               if (speed == SPEED_1000)
-                       regmap_update_bits(priv_plat->peri_regmap,
-                                          MT8195_PERI_ETH_CTRL0,
-                                          MT8195_RGMII_TXC_PHASE_CTRL |
-                                          MT8195_DLY_GTXC_ENABLE |
-                                          MT8195_DLY_GTXC_INV |
-                                          MT8195_DLY_GTXC_STAGES,
-                                          MT8195_RGMII_TXC_PHASE_CTRL);
-               else
-                       mt8195_set_delay(priv_plat);
-       }
-}
-
 static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
        .dwmac_set_phy_interface = mt8195_set_interface,
        .dwmac_set_delay = mt8195_set_delay,
-       .dwmac_fix_mac_speed = mt8195_fix_mac_speed,
        .clk_list = mt8195_dwmac_clk_l,
        .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
        .dma_bit_mask = 35,
@@ -619,8 +595,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev,
        plat->bsp_priv = priv_plat;
        plat->init = mediatek_dwmac_init;
        plat->clks_config = mediatek_dwmac_clks_config;
-       if (priv_plat->variant->dwmac_fix_mac_speed)
-               plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed;
 
        plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
                                             sizeof(*plat->safety_feat_cfg),
index 9c2d40f853ed0429d19d2ac0d647640f29dda878..413f660172199c7b893072befd3dce0170a43cc2 100644 (file)
@@ -186,11 +186,25 @@ static void dwmac5_handle_dma_err(struct net_device *ndev,
 int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
                              struct stmmac_safety_feature_cfg *safety_feat_cfg)
 {
+       struct stmmac_safety_feature_cfg all_safety_feats = {
+               .tsoee = 1,
+               .mrxpee = 1,
+               .mestee = 1,
+               .mrxee = 1,
+               .mtxee = 1,
+               .epsi = 1,
+               .edpp = 1,
+               .prtyen = 1,
+               .tmouten = 1,
+       };
        u32 value;
 
        if (!asp)
                return -EINVAL;
 
+       if (!safety_feat_cfg)
+               safety_feat_cfg = &all_safety_feats;
+
        /* 1. Enable Safety Features */
        value = readl(ioaddr + MTL_ECC_CONTROL);
        value |= MEEAO; /* MTL ECC Error Addr Status Override */
index f453b0d093663d5db152d217f8376943e3ce5cc1..35c8dd92d3692fb7f19645b2d58b4a4b639cd3e9 100644 (file)
@@ -551,16 +551,16 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
                p = (char *)priv + offsetof(struct stmmac_priv,
                                            xstats.txq_stats[q].tx_pkt_n);
                for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
-                       *data++ = (*(u64 *)p);
-                       p += sizeof(u64 *);
+                       *data++ = (*(unsigned long *)p);
+                       p += sizeof(unsigned long);
                }
        }
        for (q = 0; q < rx_cnt; q++) {
                p = (char *)priv + offsetof(struct stmmac_priv,
                                            xstats.rxq_stats[q].rx_pkt_n);
                for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
-                       *data++ = (*(u64 *)p);
-                       p += sizeof(u64 *);
+                       *data++ = (*(unsigned long *)p);
+                       p += sizeof(unsigned long);
                }
        }
 }
index c6951c976f5ddb2b67dc760bbb433c871c3efab4..b7e5af58ab750e67ebd51ca15a8dff0f20ee6efa 100644 (file)
@@ -1150,6 +1150,11 @@ static int stmmac_init_phy(struct net_device *dev)
                int addr = priv->plat->phy_addr;
                struct phy_device *phydev;
 
+               if (addr < 0) {
+                       netdev_err(priv->dev, "no phy found\n");
+                       return -ENODEV;
+               }
+
                phydev = mdiobus_get_phy(priv->mii, addr);
                if (!phydev) {
                        netdev_err(priv->dev, "no phy at addr %d\n", addr);
index fc06ddeac0d534b98a64f64c7459450cf48f0baa..b4388ca8d211608d1b53901ecd838f5d274e5583 100644 (file)
@@ -210,7 +210,10 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
                }
                writel(acr_value, ptpaddr + PTP_ACR);
                mutex_unlock(&priv->aux_ts_lock);
-               ret = 0;
+               /* wait for auxts fifo clear to finish */
+               ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
+                                        !(acr_value & PTP_ACR_ATSFC),
+                                        10, 10000);
                break;
 
        default:
index 7552c400961eb7fb9aeed45c7365954fe81bc20d..b83390c48615801bfd7c041f236dc7d5ebf9d7d8 100644 (file)
@@ -357,7 +357,7 @@ static const struct ipa_mem ipa_mem_local_data[] = {
 static const struct ipa_mem_data ipa_mem_data = {
        .local_count    = ARRAY_SIZE(ipa_mem_local_data),
        .local          = ipa_mem_local_data,
-       .imem_addr      = 0x146a9000,
+       .imem_addr      = 0x146a8000,
        .imem_size      = 0x00002000,
        .smem_id        = 497,
        .smem_size      = 0x00009000,
index a49f66efacb8750a50f8467007d002897fac2bb0..c1b3977e1ae48463a76e6c1e9c694ba24bf49411 100644 (file)
@@ -127,15 +127,25 @@ out_power_put:
        return IRQ_HANDLED;
 }
 
+void ipa_interrupt_irq_disable(struct ipa *ipa)
+{
+       disable_irq(ipa->interrupt->irq);
+}
+
+void ipa_interrupt_irq_enable(struct ipa *ipa)
+{
+       enable_irq(ipa->interrupt->irq);
+}
+
 /* Common function used to enable/disable TX_SUSPEND for an endpoint */
 static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
                                          u32 endpoint_id, bool enable)
 {
        struct ipa *ipa = interrupt->ipa;
+       u32 mask = BIT(endpoint_id % 32);
        u32 unit = endpoint_id / 32;
        const struct ipa_reg *reg;
        u32 offset;
-       u32 mask;
        u32 val;
 
        WARN_ON(!test_bit(endpoint_id, ipa->available));
@@ -148,7 +158,6 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
        offset = ipa_reg_n_offset(reg, unit);
        val = ioread32(ipa->reg_virt + offset);
 
-       mask = BIT(endpoint_id);
        if (enable)
                val |= mask;
        else
index f31fd9965fdc678a38e5f1223d28233a773007e1..8a1bd5b893932badac38a6669a0fc48fc4da80b2 100644 (file)
@@ -85,6 +85,22 @@ void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt);
  */
 void ipa_interrupt_simulate_suspend(struct ipa_interrupt *interrupt);
 
+/**
+ * ipa_interrupt_irq_enable() - Enable IPA interrupts
+ * @ipa:       IPA pointer
+ *
+ * This enables the IPA interrupt line
+ */
+void ipa_interrupt_irq_enable(struct ipa *ipa);
+
+/**
+ * ipa_interrupt_irq_disable() - Disable IPA interrupts
+ * @ipa:       IPA pointer
+ *
+ * This disables the IPA interrupt line
+ */
+void ipa_interrupt_irq_disable(struct ipa *ipa);
+
 /**
  * ipa_interrupt_config() - Configure the IPA interrupt framework
  * @ipa:       IPA pointer
index 8420f93128a268cce757f93dcaaf6a7979a3c0cd..8057be8cda8013457a8c4e83ede340eaff07bbf0 100644 (file)
@@ -181,6 +181,17 @@ static int ipa_suspend(struct device *dev)
 
        __set_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
 
+       /* Increment the disable depth to ensure that the IRQ won't
+        * be re-enabled until the matching _enable call in
+        * ipa_resume(). We do this to ensure that the interrupt
+        * handler won't run whilst PM runtime is disabled.
+        *
+        * Note that disabling the IRQ is NOT the same as disabling
+        * irq wake. If wakeup is enabled for the IPA then the IRQ
+        * will still cause the system to wake up, see irq_set_irq_wake().
+        */
+       ipa_interrupt_irq_disable(ipa);
+
        return pm_runtime_force_suspend(dev);
 }
 
@@ -193,6 +204,12 @@ static int ipa_resume(struct device *dev)
 
        __clear_bit(IPA_POWER_FLAG_SYSTEM, ipa->power->flags);
 
+       /* Now that PM runtime is enabled again it's safe
+        * to turn the IRQ back on and process any data
+        * that was received during suspend.
+        */
+       ipa_interrupt_irq_enable(ipa);
+
        return ret;
 }
 
index 1cd604cd1fa1b15f2a8be344dcf7ba0443bd78c3..16e021b477f0636d630f8e342234e84bab0440cf 100644 (file)
@@ -108,7 +108,12 @@ EXPORT_SYMBOL(mdiobus_unregister_device);
 
 struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr)
 {
-       struct mdio_device *mdiodev = bus->mdio_map[addr];
+       struct mdio_device *mdiodev;
+
+       if (addr < 0 || addr >= ARRAY_SIZE(bus->mdio_map))
+               return NULL;
+
+       mdiodev = bus->mdio_map[addr];
 
        if (!mdiodev)
                return NULL;
index 8dcb49ed1f3d9a13eab0153685484971cc4df07b..7fd9fe6a602bcb4c92b2cc2efa91fb8c266a5363 100644 (file)
@@ -105,6 +105,7 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
 
        if (!priv->phy_dev->drv) {
                dev_info(dev, "Attached phy not ready\n");
+               put_device(&priv->phy_dev->mdio.dev);
                return -EPROBE_DEFER;
        }
 
index fcd43d62d86bcbc2b19567d83541de6248c3c4f8..d10606f257c43345e1d293dd462cdf77fc37497b 100644 (file)
@@ -1044,7 +1044,6 @@ static int team_port_enter(struct team *team, struct team_port *port)
                        goto err_port_enter;
                }
        }
-       port->dev->priv_flags |= IFF_NO_ADDRCONF;
 
        return 0;
 
@@ -1058,7 +1057,6 @@ static void team_port_leave(struct team *team, struct team_port *port)
 {
        if (team->ops.port_leave)
                team->ops.port_leave(team, port);
-       port->dev->priv_flags &= ~IFF_NO_ADDRCONF;
        dev_put(team->dev);
 }
 
index 8911cd2ed5343d23a02cd16387b1f9f0ccc93dfe..c140edb4b6482c753b501a14936305ac5e622c75 100644 (file)
@@ -1007,6 +1007,12 @@ static const struct usb_device_id        products[] = {
                                      USB_CDC_SUBCLASS_ETHERNET,
                                      USB_CDC_PROTO_NONE),
        .driver_info = (unsigned long)&wwan_info,
+}, {
+       /* Cinterion PLS62-W modem by GEMALTO/THALES */
+       USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = (unsigned long)&wwan_info,
 }, {
        /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */
        USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM,
index a481a1d831e2f4fe45562e76c5fd034c0d361ac1..23da1d9dafd1f32db5e6b8030deac2cb3697360a 100644 (file)
@@ -9836,6 +9836,7 @@ static const struct usb_device_id rtl8152_table[] = {
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab),
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6),
        REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927),
+       REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e),
        REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101),
        REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f),
        REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x3054),
index f79333fe17836c02d856cbf4ce942c9b05a6cbd9..7b3739b29c8f72b7b108c5f4ae11fdfcf243237d 100644 (file)
@@ -255,7 +255,8 @@ static int rndis_query(struct usbnet *dev, struct usb_interface *intf,
 
        off = le32_to_cpu(u.get_c->offset);
        len = le32_to_cpu(u.get_c->len);
-       if (unlikely((8 + off + len) > CONTROL_BUFFER_SIZE))
+       if (unlikely((off > CONTROL_BUFFER_SIZE - 8) ||
+                    (len > CONTROL_BUFFER_SIZE - 8 - off)))
                goto response_error;
 
        if (*reply_len != -1 && len != *reply_len)
index 5a53e63d33a60995ea8023474e664f6fc14017a5..3164451e1010cc80a67e2fddc89a9e59a6721cab 100644 (file)
@@ -413,7 +413,7 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
                /* ignore the CRC length */
                len = (skb->data[1] | (skb->data[2] << 8)) - 4;
 
-               if (len > ETH_FRAME_LEN || len > skb->len)
+               if (len > ETH_FRAME_LEN || len > skb->len || len < 0)
                        return 0;
 
                /* the last packet of current skb */
index ac7c0653695f04c38ec7ecdab833b3127982a904..dfc7d87fad59fed967bdbdb12440967471f6d52d 100644 (file)
@@ -974,6 +974,9 @@ static int veth_poll(struct napi_struct *napi, int budget)
        xdp_set_return_frame_no_direct();
        done = veth_xdp_rcv(rq, budget, &bq, &stats);
 
+       if (stats.xdp_redirect > 0)
+               xdp_do_flush();
+
        if (done < budget && napi_complete_done(napi, done)) {
                /* Write rx_notify_masked before reading ptr_ring */
                smp_store_mb(rq->rx_notify_masked, false);
@@ -987,8 +990,6 @@ static int veth_poll(struct napi_struct *napi, int budget)
 
        if (stats.xdp_tx > 0)
                veth_xdp_flush(rq, &bq);
-       if (stats.xdp_redirect > 0)
-               xdp_do_flush();
        xdp_clear_return_frame_no_direct();
 
        return done;
index 7723b2a49d8ecc84ff10568dd98732657f492882..18b3de854aeb9b50b5dc2d7acece66ab55dce824 100644 (file)
@@ -1877,8 +1877,10 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
                netif_stop_subqueue(dev, qnum);
-               if (!use_napi &&
-                   unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
+               if (use_napi) {
+                       if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
+                               virtqueue_napi_schedule(&sq->napi, sq->vq);
+               } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
                        /* More just got used, free them then recheck. */
                        free_old_xmit_skbs(sq, false);
                        if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
index 6f1e560fb15c4f10d42764182022b36bb8f573c4..56267c327f0b74da22a7a6130c3fb0e755e95a4f 100644 (file)
@@ -1288,6 +1288,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                    (le32_to_cpu(gdesc->dword[3]) &
                     VMXNET3_RCD_CSUM_OK) == VMXNET3_RCD_CSUM_OK) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       if ((le32_to_cpu(gdesc->dword[0]) &
+                                    (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+                               skb->csum_level = 1;
+                       }
                        WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
                                     !(le32_to_cpu(gdesc->dword[0]) &
                                     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
@@ -1297,6 +1301,10 @@ vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
                } else if (gdesc->rcd.v6 && (le32_to_cpu(gdesc->dword[3]) &
                                             (1 << VMXNET3_RCD_TUC_SHIFT))) {
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       if ((le32_to_cpu(gdesc->dword[0]) &
+                                    (1UL << VMXNET3_RCD_HDR_INNER_SHIFT))) {
+                               skb->csum_level = 1;
+                       }
                        WARN_ON_ONCE(!(gdesc->rcd.tcp || gdesc->rcd.udp) &&
                                     !(le32_to_cpu(gdesc->dword[0]) &
                                     (1UL << VMXNET3_RCD_HDR_INNER_SHIFT)));
index 6b5a4d036d1533886654ff6de21a517bf9d9b4af..bdb3a76a352e459f1d302652c1a9c95c8f31fbe3 100644 (file)
@@ -1385,8 +1385,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 
        /* loopback, multicast & non-ND link-local traffic; do not push through
         * packet taps again. Reset pkt_type for upper layers to process skb.
-        * For strict packets with a source LLA, determine the dst using the
-        * original ifindex.
+        * For non-loopback strict packets, determine the dst using the original
+        * ifindex.
         */
        if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
                skb->dev = vrf_dev;
@@ -1395,7 +1395,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
 
                if (skb->pkt_type == PACKET_LOOPBACK)
                        skb->pkt_type = PACKET_HOST;
-               else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)
+               else
                        vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
 
                goto out;
index 92224b36787a83b6de5222c4aae2a9df324797a3..b1b179effe2a13e52f1bd3df386d1e339ea4fe68 100644 (file)
@@ -2917,16 +2917,23 @@ static int vxlan_init(struct net_device *dev)
                vxlan_vnigroup_init(vxlan);
 
        dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-       if (!dev->tstats)
-               return -ENOMEM;
+       if (!dev->tstats) {
+               err = -ENOMEM;
+               goto err_vnigroup_uninit;
+       }
 
        err = gro_cells_init(&vxlan->gro_cells, dev);
-       if (err) {
-               free_percpu(dev->tstats);
-               return err;
-       }
+       if (err)
+               goto err_free_percpu;
 
        return 0;
+
+err_free_percpu:
+       free_percpu(dev->tstats);
+err_vnigroup_uninit:
+       if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
+               vxlan_vnigroup_uninit(vxlan);
+       return err;
 }
 
 static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
index 22edea6ca4b817b55bfcb7c0a25a8c5d23ceec5c..1c53b5546927052a336bcefabd6876c999f6f1c2 100644 (file)
@@ -1243,9 +1243,11 @@ static int ucc_hdlc_probe(struct platform_device *pdev)
 free_dev:
        free_netdev(dev);
 undo_uhdlc_init:
-       iounmap(utdm->siram);
+       if (utdm)
+               iounmap(utdm->siram);
 unmap_si_regs:
-       iounmap(utdm->si_regs);
+       if (utdm)
+               iounmap(utdm->si_regs);
 free_utdm:
        if (uhdlc_priv->tsa)
                kfree(utdm);
index 30f0765fb9fd8ff57d8f0e5b904cc2cec71fbe65..237f4ec2cffd7b0c53f8d1b253fe81072cf1ceab 100644 (file)
@@ -327,9 +327,9 @@ static inline struct ath9k_htc_tx_ctl *HTC_SKB_CB(struct sk_buff *skb)
 }
 
 #ifdef CONFIG_ATH9K_HTC_DEBUGFS
-#define __STAT_SAFE(hif_dev, expr)     ((hif_dev)->htc_handle->drv_priv ? (expr) : 0)
-#define CAB_STAT_INC(priv)             ((priv)->debug.tx_stats.cab_queued++)
-#define TX_QSTAT_INC(priv, q)          ((priv)->debug.tx_stats.queue_stats[q]++)
+#define __STAT_SAFE(hif_dev, expr)     do { ((hif_dev)->htc_handle->drv_priv ? (expr) : 0); } while (0)
+#define CAB_STAT_INC(priv)             do { ((priv)->debug.tx_stats.cab_queued++); } while (0)
+#define TX_QSTAT_INC(priv, q)          do { ((priv)->debug.tx_stats.queue_stats[q]++); } while (0)
 
 #define TX_STAT_INC(hif_dev, c) \
                __STAT_SAFE((hif_dev), (hif_dev)->htc_handle->drv_priv->debug.tx_stats.c++)
@@ -378,10 +378,10 @@ void ath9k_htc_get_et_stats(struct ieee80211_hw *hw,
                            struct ethtool_stats *stats, u64 *data);
 #else
 
-#define TX_STAT_INC(hif_dev, c)
-#define TX_STAT_ADD(hif_dev, c, a)
-#define RX_STAT_INC(hif_dev, c)
-#define RX_STAT_ADD(hif_dev, c, a)
+#define TX_STAT_INC(hif_dev, c)                do { } while (0)
+#define TX_STAT_ADD(hif_dev, c, a)     do { } while (0)
+#define RX_STAT_INC(hif_dev, c)                do { } while (0)
+#define RX_STAT_ADD(hif_dev, c, a)     do { } while (0)
 
 #define CAB_STAT_INC(priv)
 #define TX_QSTAT_INC(priv, c)
index bff3128c2f261c53a183bec1005d2a7a54b89980..b115902eb475e9ee4cb228f72ed6bdaab3fcfdcc 100644 (file)
@@ -7937,6 +7937,9 @@ cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev,
        struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
 
+       if (chan->flags & IEEE80211_CHAN_DISABLED)
+               return -EINVAL;
+
        /* set_channel */
        chspec = channel_to_chanspec(&cfg->d11inf, chan);
        if (chspec != INVCHANSPEC) {
@@ -7961,7 +7964,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
        struct brcmf_if *ifp = netdev_priv(cfg_to_ndev(cfg));
        struct brcmf_dump_survey survey = {};
        struct ieee80211_supported_band *band;
-       struct ieee80211_channel *chan;
+       enum nl80211_band band_id;
        struct cca_msrmnt_query req;
        u32 noise;
        int err;
@@ -7974,26 +7977,25 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
                return -EBUSY;
        }
 
-       band = wiphy->bands[NL80211_BAND_2GHZ];
-       if (band && idx >= band->n_channels) {
-               idx -= band->n_channels;
-               band = NULL;
-       }
+       for (band_id = 0; band_id < NUM_NL80211_BANDS; band_id++) {
+               band = wiphy->bands[band_id];
+               if (!band)
+                       continue;
+               if (idx >= band->n_channels) {
+                       idx -= band->n_channels;
+                       continue;
+               }
 
-       if (!band || idx >= band->n_channels) {
-               band = wiphy->bands[NL80211_BAND_5GHZ];
-               if (idx >= band->n_channels)
-                       return -ENOENT;
+               info->channel = &band->channels[idx];
+               break;
        }
+       if (band_id == NUM_NL80211_BANDS)
+               return -ENOENT;
 
        /* Setting current channel to the requested channel */
-       chan = &band->channels[idx];
-       err = cfg80211_set_channel(wiphy, ndev, chan, NL80211_CHAN_HT20);
-       if (err) {
-               info->channel = chan;
-               info->filled = 0;
+       info->filled = 0;
+       if (cfg80211_set_channel(wiphy, ndev, info->channel, NL80211_CHAN_HT20))
                return 0;
-       }
 
        /* Disable mpc */
        brcmf_set_mpc(ifp, 0);
@@ -8028,7 +8030,6 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
        if (err)
                goto exit;
 
-       info->channel = chan;
        info->noise = noise;
        info->time = ACS_MSRMNT_DELAY;
        info->time_busy = ACS_MSRMNT_DELAY - survey.idle;
@@ -8040,7 +8041,7 @@ brcmf_cfg80211_dump_survey(struct wiphy *wiphy, struct net_device *ndev,
                SURVEY_INFO_TIME_TX;
 
        brcmf_dbg(INFO, "OBSS dump: channel %d: survey duration %d\n",
-                 ieee80211_frequency_to_channel(chan->center_freq),
+                 ieee80211_frequency_to_channel(info->channel->center_freq),
                  ACS_MSRMNT_DELAY);
        brcmf_dbg(INFO, "noise(%d) busy(%llu) rx(%llu) tx(%llu)\n",
                  info->noise, info->time_busy, info->time_rx, info->time_tx);
index a83699de01ec3c4ba5802241624be8ef6c706073..fdd0c9abc1a10dbf109ff68a9f92cec4e37401f2 100644 (file)
@@ -79,7 +79,8 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
        /* Apple ARM64 platforms have their own idea of board type, passed in
         * via the device tree. They also have an antenna SKU parameter
         */
-       if (!of_property_read_string(np, "brcm,board-type", &prop))
+       err = of_property_read_string(np, "brcm,board-type", &prop);
+       if (!err)
                settings->board_type = prop;
 
        if (!of_property_read_string(np, "apple,antenna-sku", &prop))
@@ -87,7 +88,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
 
        /* Set board-type to the first string of the machine compatible prop */
        root = of_find_node_by_path("/");
-       if (root && !settings->board_type) {
+       if (root && err) {
                char *board_type;
                const char *tmp;
 
index ae57a9a3ab055f943f2b3b9b6a7721877796bfee..b67f6d0810b6ccaa12bc6adddc99ebac568f6e0a 100644 (file)
@@ -1228,7 +1228,7 @@ static int brcmf_pcie_init_ringbuffers(struct brcmf_pciedev_info *devinfo)
                                BRCMF_NROF_H2D_COMMON_MSGRINGS;
                max_completionrings = BRCMF_NROF_D2H_COMMON_MSGRINGS;
        }
-       if (max_flowrings > 256) {
+       if (max_flowrings > 512) {
                brcmf_err(bus, "invalid max_flowrings(%d)\n", max_flowrings);
                return -EIO;
        }
index e6d64152c81a75e6d346c8278bc0590cab22a9fb..a02e5a67b7066b0498e0e4f81c9e41685675a6de 100644 (file)
@@ -1106,6 +1106,11 @@ int iwl_read_ppag_table(struct iwl_fw_runtime *fwrt, union iwl_ppag_table_cmd *c
         int i, j, num_sub_bands;
         s8 *gain;
 
+       /* many firmware images for JF lie about this */
+       if (CSR_HW_RFID_TYPE(fwrt->trans->hw_rf_id) ==
+           CSR_HW_RFID_TYPE(CSR_HW_RF_ID_TYPE_JF))
+               return -EOPNOTSUPP;
+
         if (!fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SET_PPAG)) {
                 IWL_DEBUG_RADIO(fwrt,
                                 "PPAG capability not supported by FW, command not sent.\n");
index f795548562f556d90d834e1b985d568b1aecd481..06161815c180edc6055e08835104aa4d1f259ff7 100644 (file)
@@ -205,6 +205,52 @@ mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
        mt76_dma_sync_idx(dev, q);
 }
 
+static int
+mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
+                   struct mt76_queue_buf *buf, void *data)
+{
+       struct mt76_desc *desc = &q->desc[q->head];
+       struct mt76_queue_entry *entry = &q->entry[q->head];
+       struct mt76_txwi_cache *txwi = NULL;
+       u32 buf1 = 0, ctrl;
+       int idx = q->head;
+       int rx_token;
+
+       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+
+       if ((q->flags & MT_QFLAG_WED) &&
+           FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
+               txwi = mt76_get_rxwi(dev);
+               if (!txwi)
+                       return -ENOMEM;
+
+               rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
+               if (rx_token < 0) {
+                       mt76_put_rxwi(dev, txwi);
+                       return -ENOMEM;
+               }
+
+               buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
+               ctrl |= MT_DMA_CTL_TO_HOST;
+       }
+
+       WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
+       WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
+       WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+       WRITE_ONCE(desc->info, 0);
+
+       entry->dma_addr[0] = buf->addr;
+       entry->dma_len[0] = buf->len;
+       entry->txwi = txwi;
+       entry->buf = data;
+       entry->wcid = 0xffff;
+       entry->skip_buf1 = true;
+       q->head = (q->head + 1) % q->ndesc;
+       q->queued++;
+
+       return idx;
+}
+
 static int
 mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
                 struct mt76_queue_buf *buf, int nbufs, u32 info,
@@ -212,65 +258,51 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
 {
        struct mt76_queue_entry *entry;
        struct mt76_desc *desc;
-       u32 ctrl;
        int i, idx = -1;
+       u32 ctrl, next;
+
+       if (txwi) {
+               q->entry[q->head].txwi = DMA_DUMMY_DATA;
+               q->entry[q->head].skip_buf0 = true;
+       }
 
        for (i = 0; i < nbufs; i += 2, buf += 2) {
                u32 buf0 = buf[0].addr, buf1 = 0;
 
                idx = q->head;
-               q->head = (q->head + 1) % q->ndesc;
+               next = (q->head + 1) % q->ndesc;
 
                desc = &q->desc[idx];
                entry = &q->entry[idx];
 
-               if ((q->flags & MT_QFLAG_WED) &&
-                   FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-                       struct mt76_txwi_cache *t = txwi;
-                       int rx_token;
-
-                       if (!t)
-                               return -ENOMEM;
-
-                       rx_token = mt76_rx_token_consume(dev, (void *)skb, t,
-                                                        buf[0].addr);
-                       buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
-                       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len) |
-                              MT_DMA_CTL_TO_HOST;
-               } else {
-                       if (txwi) {
-                               q->entry[q->head].txwi = DMA_DUMMY_DATA;
-                               q->entry[q->head].skip_buf0 = true;
-                       }
-
-                       if (buf[0].skip_unmap)
-                               entry->skip_buf0 = true;
-                       entry->skip_buf1 = i == nbufs - 1;
-
-                       entry->dma_addr[0] = buf[0].addr;
-                       entry->dma_len[0] = buf[0].len;
-
-                       ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
-                       if (i < nbufs - 1) {
-                               entry->dma_addr[1] = buf[1].addr;
-                               entry->dma_len[1] = buf[1].len;
-                               buf1 = buf[1].addr;
-                               ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
-                               if (buf[1].skip_unmap)
-                                       entry->skip_buf1 = true;
-                       }
-
-                       if (i == nbufs - 1)
-                               ctrl |= MT_DMA_CTL_LAST_SEC0;
-                       else if (i == nbufs - 2)
-                               ctrl |= MT_DMA_CTL_LAST_SEC1;
+               if (buf[0].skip_unmap)
+                       entry->skip_buf0 = true;
+               entry->skip_buf1 = i == nbufs - 1;
+
+               entry->dma_addr[0] = buf[0].addr;
+               entry->dma_len[0] = buf[0].len;
+
+               ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
+               if (i < nbufs - 1) {
+                       entry->dma_addr[1] = buf[1].addr;
+                       entry->dma_len[1] = buf[1].len;
+                       buf1 = buf[1].addr;
+                       ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
+                       if (buf[1].skip_unmap)
+                               entry->skip_buf1 = true;
                }
 
+               if (i == nbufs - 1)
+                       ctrl |= MT_DMA_CTL_LAST_SEC0;
+               else if (i == nbufs - 2)
+                       ctrl |= MT_DMA_CTL_LAST_SEC1;
+
                WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
                WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
                WRITE_ONCE(desc->info, cpu_to_le32(info));
                WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
 
+               q->head = next;
                q->queued++;
        }
 
@@ -577,17 +609,9 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
        spin_lock_bh(&q->lock);
 
        while (q->queued < q->ndesc - 1) {
-               struct mt76_txwi_cache *t = NULL;
                struct mt76_queue_buf qbuf;
                void *buf = NULL;
 
-               if ((q->flags & MT_QFLAG_WED) &&
-                   FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
-                       t = mt76_get_rxwi(dev);
-                       if (!t)
-                               break;
-               }
-
                buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
                if (!buf)
                        break;
@@ -601,7 +625,12 @@ mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
                qbuf.addr = addr + offset;
                qbuf.len = len - offset;
                qbuf.skip_unmap = false;
-               mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, t);
+               if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
+                       dma_unmap_single(dev->dma_dev, addr, len,
+                                        DMA_FROM_DEVICE);
+                       skb_free_frag(buf);
+                       break;
+               }
                frames++;
        }
 
index 0a95c3da241b5051ec413f42e8b1562f571b40b2..8388e2a658535fbe5349369e020f68e8a53c8e24 100644 (file)
@@ -653,6 +653,13 @@ static u32 mt7915_mmio_wed_init_rx_buf(struct mtk_wed_device *wed, int size)
 
                desc->buf0 = cpu_to_le32(phy_addr);
                token = mt76_rx_token_consume(&dev->mt76, ptr, t, phy_addr);
+               if (token < 0) {
+                       dma_unmap_single(dev->mt76.dma_dev, phy_addr,
+                                        wed->wlan.rx_size, DMA_TO_DEVICE);
+                       skb_free_frag(ptr);
+                       goto unmap;
+               }
+
                desc->token |= cpu_to_le32(FIELD_PREP(MT_DMA_CTL_TOKEN,
                                                      token));
                desc++;
index 5c5fc569e6d59afef621e3fc0f4d991afe9a46d5..79fb47a73c91d01a16a0e4047d6d1971929208a8 100644 (file)
@@ -2,6 +2,7 @@
 config MT7996E
        tristate "MediaTek MT7996 (PCIe) support"
        select MT76_CONNAC_LIB
+       select RELAY
        depends on MAC80211
        depends on PCI
        help
index 24568b98ed9dfb0c9c04fa6492495290d7efb1c9..1f309d05380ad5fb6c266b6cc1e220bc691df986 100644 (file)
@@ -764,11 +764,12 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
        spin_lock_bh(&dev->rx_token_lock);
        token = idr_alloc(&dev->rx_token, t, 0, dev->rx_token_size,
                          GFP_ATOMIC);
+       if (token >= 0) {
+               t->ptr = ptr;
+               t->dma_addr = phys;
+       }
        spin_unlock_bh(&dev->rx_token_lock);
 
-       t->ptr = ptr;
-       t->dma_addr = phys;
-
        return token;
 }
 EXPORT_SYMBOL_GPL(mt76_rx_token_consume);
index 82a7458e01aec063643ec83da31cb3a50ae96cd4..bf72e5fd39cf40a88c331726f5751eee232192db 100644 (file)
@@ -696,8 +696,8 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
                struct rndis_query      *get;
                struct rndis_query_c    *get_c;
        } u;
-       int ret, buflen;
-       int resplen, respoffs, copylen;
+       int ret;
+       size_t buflen, resplen, respoffs, copylen;
 
        buflen = *len + sizeof(*u.get);
        if (buflen < CONTROL_BUFFER_SIZE)
@@ -732,22 +732,15 @@ static int rndis_query_oid(struct usbnet *dev, u32 oid, void *data, int *len)
 
                if (respoffs > buflen) {
                        /* Device returned data offset outside buffer, error. */
-                       netdev_dbg(dev->net, "%s(%s): received invalid "
-                               "data offset: %d > %d\n", __func__,
-                               oid_to_string(oid), respoffs, buflen);
+                       netdev_dbg(dev->net,
+                                  "%s(%s): received invalid data offset: %zu > %zu\n",
+                                  __func__, oid_to_string(oid), respoffs, buflen);
 
                        ret = -EINVAL;
                        goto exit_unlock;
                }
 
-               if ((resplen + respoffs) > buflen) {
-                       /* Device would have returned more data if buffer would
-                        * have been big enough. Copy just the bits that we got.
-                        */
-                       copylen = buflen - respoffs;
-               } else {
-                       copylen = resplen;
-               }
+               copylen = min(resplen, buflen - respoffs);
 
                if (copylen > *len)
                        copylen = *len;
index 0530dd744275c5d523effb5ade9c85a7a3992adf..05ee016594f8cfb7b1020d4ad6fe88153052f4d2 100644 (file)
@@ -3,6 +3,3 @@ obj-$(CONFIG_WLCORE)                    += wlcore/
 obj-$(CONFIG_WL12XX)                   += wl12xx/
 obj-$(CONFIG_WL1251)                   += wl1251/
 obj-$(CONFIG_WL18XX)                   += wl18xx/
-
-# small builtin driver bit
-obj-$(CONFIG_WILINK_PLATFORM_DATA)     += wilink_platform_data.o
index c1ba4294f364793f16397bca14ba6b75f5c8bd59..001636901ddae2e8afededa02d6b9533c7912b79 100644 (file)
@@ -977,7 +977,7 @@ static int read_xenbus_vif_flags(struct backend_info *be)
        return 0;
 }
 
-static int netback_remove(struct xenbus_device *dev)
+static void netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
 
@@ -992,7 +992,6 @@ static int netback_remove(struct xenbus_device *dev)
        kfree(be->hotplug_script);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
-       return 0;
 }
 
 /*
index 14aec417fa063ee0c9e34394bbf90c261ba47c5e..12b074286df9daf8b83aa84cb62a2039d5d48019 100644 (file)
@@ -2646,7 +2646,7 @@ static void xennet_bus_close(struct xenbus_device *dev)
        } while (!ret);
 }
 
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_remove(struct xenbus_device *dev)
 {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
 
@@ -2662,8 +2662,6 @@ static int xennet_remove(struct xenbus_device *dev)
                rtnl_unlock();
        }
        xennet_free_netdev(info->netdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id netfront_ids[] = {
index 6f71ac72012ea5e6cfcbcb762d8f81bef6ecb7f0..ed9c5e2cf3ad43feaa590820202ec844ee96ed6c 100644 (file)
@@ -153,10 +153,17 @@ static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags)
        return usb_submit_urb(phy->ack_urb, flags);
 }
 
+struct pn533_out_arg {
+       struct pn533_usb_phy *phy;
+       struct completion done;
+};
+
 static int pn533_usb_send_frame(struct pn533 *dev,
                                struct sk_buff *out)
 {
        struct pn533_usb_phy *phy = dev->phy;
+       struct pn533_out_arg arg;
+       void *cntx;
        int rc;
 
        if (phy->priv == NULL)
@@ -168,10 +175,17 @@ static int pn533_usb_send_frame(struct pn533 *dev,
        print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1,
                             out->data, out->len, false);
 
+       init_completion(&arg.done);
+       cntx = phy->out_urb->context;
+       phy->out_urb->context = &arg;
+
        rc = usb_submit_urb(phy->out_urb, GFP_KERNEL);
        if (rc)
                return rc;
 
+       wait_for_completion(&arg.done);
+       phy->out_urb->context = cntx;
+
        if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
                /* request for response for sent packet directly */
                rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
@@ -408,7 +422,31 @@ static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy)
        return arg.rc;
 }
 
-static void pn533_send_complete(struct urb *urb)
+static void pn533_out_complete(struct urb *urb)
+{
+       struct pn533_out_arg *arg = urb->context;
+       struct pn533_usb_phy *phy = arg->phy;
+
+       switch (urb->status) {
+       case 0:
+               break; /* success */
+       case -ECONNRESET:
+       case -ENOENT:
+               dev_dbg(&phy->udev->dev,
+                       "The urb has been stopped (status %d)\n",
+                       urb->status);
+               break;
+       case -ESHUTDOWN:
+       default:
+               nfc_err(&phy->udev->dev,
+                       "Urb failure (status %d)\n",
+                       urb->status);
+       }
+
+       complete(&arg->done);
+}
+
+static void pn533_ack_complete(struct urb *urb)
 {
        struct pn533_usb_phy *phy = urb->context;
 
@@ -496,10 +534,10 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        usb_fill_bulk_urb(phy->out_urb, phy->udev,
                          usb_sndbulkpipe(phy->udev, out_endpoint),
-                         NULL, 0, pn533_send_complete, phy);
+                         NULL, 0, pn533_out_complete, phy);
        usb_fill_bulk_urb(phy->ack_urb, phy->udev,
                          usb_sndbulkpipe(phy->udev, out_endpoint),
-                         NULL, 0, pn533_send_complete, phy);
+                         NULL, 0, pn533_ack_complete, phy);
 
        switch (id->driver_info) {
        case PN533_DEVICE_STD:
index e36aeb50b4edc1263654308f59610b28b04253e5..b317ce6c4ec31803347ff49d1bf9eec569c87150 100644 (file)
@@ -829,7 +829,23 @@ static void apple_nvme_disable(struct apple_nvme *anv, bool shutdown)
                        apple_nvme_remove_cq(anv);
                }
 
-               nvme_disable_ctrl(&anv->ctrl, shutdown);
+               /*
+                * Always disable the NVMe controller after shutdown.
+                * We need to do this to bring it back up later anyway, and we
+                * can't do it while the firmware is not running (e.g. in the
+                * resume reset path before RTKit is initialized), so for Apple
+                * controllers it makes sense to unconditionally do it here.
+                * Additionally, this sequence of events is reliable, while
+                * others (like disabling after bringing back the firmware on
+                * resume) seem to run into trouble under some circumstances.
+                *
+                * Both U-Boot and m1n1 also use this convention (i.e. an ANS
+                * NVMe controller is handed off with firmware shut down, in an
+                * NVMe disabled state, after a clean shutdown).
+                */
+               if (shutdown)
+                       nvme_disable_ctrl(&anv->ctrl, shutdown);
+               nvme_disable_ctrl(&anv->ctrl, false);
        }
 
        WRITE_ONCE(anv->ioq.enabled, false);
@@ -985,11 +1001,11 @@ static void apple_nvme_reset_work(struct work_struct *work)
                goto out;
        }
 
-       if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
-               apple_nvme_disable(anv, false);
-
        /* RTKit must be shut down cleanly for the (soft)-reset to work */
        if (apple_rtkit_is_running(anv->rtk)) {
+               /* reset the controller if it is enabled */
+               if (anv->ctrl.ctrl_config & NVME_CC_ENABLE)
+                       apple_nvme_disable(anv, false);
                dev_dbg(anv->dev, "Trying to shut down RTKit before reset.");
                ret = apple_rtkit_shutdown(anv->rtk);
                if (ret)
@@ -1493,7 +1509,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
        }
 
        ret = nvme_init_ctrl(&anv->ctrl, anv->dev, &nvme_ctrl_ops,
-                            NVME_QUIRK_SKIP_CID_GEN);
+                            NVME_QUIRK_SKIP_CID_GEN | NVME_QUIRK_IDENTIFY_CNS);
        if (ret) {
                dev_err_probe(dev, ret, "Failed to initialize nvme_ctrl");
                goto put_dev;
index bb0abbe4491cdc1283ce63f2ffed398bc2f069cf..4424f53a8a0a3d1a87b573b43c7d580e42fe1db3 100644 (file)
@@ -953,7 +953,7 @@ int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
                goto err_free_dhchap_secret;
 
        if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
-               return ret;
+               return 0;
 
        ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
                                sizeof(*chap), GFP_KERNEL);
index 95c488ea91c3035bf67871627b090f3c94020592..7be562a4e1aa72d6278f657a24730afabb47aee8 100644 (file)
@@ -1074,6 +1074,18 @@ static u32 nvme_known_admin_effects(u8 opcode)
        return 0;
 }
 
+static u32 nvme_known_nvm_effects(u8 opcode)
+{
+       switch (opcode) {
+       case nvme_cmd_write:
+       case nvme_cmd_write_zeroes:
+       case nvme_cmd_write_uncor:
+                return NVME_CMD_EFFECTS_LBCC;
+       default:
+               return 0;
+       }
+}
+
 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
 {
        u32 effects = 0;
@@ -1081,16 +1093,24 @@ u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, u8 opcode)
        if (ns) {
                if (ns->head->effects)
                        effects = le32_to_cpu(ns->head->effects->iocs[opcode]);
+               if (ns->head->ids.csi == NVME_CAP_CSS_NVM)
+                       effects |= nvme_known_nvm_effects(opcode);
                if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))
                        dev_warn_once(ctrl->device,
-                               "IO command:%02x has unhandled effects:%08x\n",
+                               "IO command:%02x has unusual effects:%08x\n",
                                opcode, effects);
-               return 0;
-       }
 
-       if (ctrl->effects)
-               effects = le32_to_cpu(ctrl->effects->acs[opcode]);
-       effects |= nvme_known_admin_effects(opcode);
+               /*
+                * NVME_CMD_EFFECTS_CSE_MASK causes a freeze all I/O queues,
+                * which would deadlock when done on an I/O command.  Note that
+                * We already warn about an unusual effect above.
+                */
+               effects &= ~NVME_CMD_EFFECTS_CSE_MASK;
+       } else {
+               if (ctrl->effects)
+                       effects = le32_to_cpu(ctrl->effects->acs[opcode]);
+               effects |= nvme_known_admin_effects(opcode);
+       }
 
        return effects;
 }
@@ -4926,7 +4946,7 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
 
        memset(set, 0, sizeof(*set));
        set->ops = ops;
-       set->queue_depth = ctrl->sqsize + 1;
+       set->queue_depth = min_t(unsigned, ctrl->sqsize, BLK_MQ_MAX_DEPTH - 1);
        /*
         * Some Apple controllers requires tags to be unique across admin and
         * the (only) I/O queue, so reserve the first 32 tags of the I/O queue.
index 9ddda571f0461f2d699669a385f2d8a907ce927e..06f52db34be9bd8234ad34e63f1c26590d1b2193 100644 (file)
@@ -8,12 +8,26 @@
 #include <linux/io_uring.h>
 #include "nvme.h"
 
+enum {
+       NVME_IOCTL_VEC          = (1 << 0),
+       NVME_IOCTL_PARTITION    = (1 << 1),
+};
+
 static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
-               fmode_t mode)
+               unsigned int flags, fmode_t mode)
 {
+       u32 effects;
+
        if (capable(CAP_SYS_ADMIN))
                return true;
 
+       /*
+        * Do not allow unprivileged passthrough on partitions, as that allows an
+        * escape from the containment of the partition.
+        */
+       if (flags & NVME_IOCTL_PARTITION)
+               return false;
+
        /*
         * Do not allow unprivileged processes to send vendor specific or fabrics
         * commands as we can't be sure about their effects.
@@ -43,11 +57,29 @@ static bool nvme_cmd_allowed(struct nvme_ns *ns, struct nvme_command *c,
        }
 
        /*
-        * Only allow I/O commands that transfer data to the controller if the
-        * special file is open for writing, but always allow I/O commands that
-        * transfer data from the controller.
+        * Check if the controller provides a Commands Supported and Effects log
+        * and marks this command as supported.  If not reject unprivileged
+        * passthrough.
+        */
+       effects = nvme_command_effects(ns->ctrl, ns, c->common.opcode);
+       if (!(effects & NVME_CMD_EFFECTS_CSUPP))
+               return false;
+
+       /*
+        * Don't allow passthrough for command that have intrusive (or unknown)
+        * effects.
+        */
+       if (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC |
+                       NVME_CMD_EFFECTS_UUID_SEL |
+                       NVME_CMD_EFFECTS_SCOPE_MASK))
+               return false;
+
+       /*
+        * Only allow I/O commands that transfer data to the controller or that
+        * change the logical block contents if the file descriptor is open for
+        * writing.
         */
-       if (nvme_is_write(c))
+       if (nvme_is_write(c) || (effects & NVME_CMD_EFFECTS_LBCC))
                return mode & FMODE_WRITE;
        return true;
 }
@@ -130,7 +162,7 @@ static struct request *nvme_alloc_user_request(struct request_queue *q,
 static int nvme_map_user_request(struct request *req, u64 ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
                u32 meta_seed, void **metap, struct io_uring_cmd *ioucmd,
-               bool vec)
+               unsigned int flags)
 {
        struct request_queue *q = req->q;
        struct nvme_ns *ns = q->queuedata;
@@ -143,7 +175,7 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                struct iov_iter iter;
 
                /* fixedbufs is only for non-vectored io */
-               if (WARN_ON_ONCE(vec))
+               if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
                        return -EINVAL;
                ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
                                rq_data_dir(req), &iter, ioucmd);
@@ -152,8 +184,8 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
                ret = blk_rq_map_user_iov(q, req, NULL, &iter, GFP_KERNEL);
        } else {
                ret = blk_rq_map_user_io(req, NULL, nvme_to_user_ptr(ubuffer),
-                               bufflen, GFP_KERNEL, vec, 0, 0,
-                               rq_data_dir(req));
+                               bufflen, GFP_KERNEL, flags & NVME_IOCTL_VEC, 0,
+                               0, rq_data_dir(req));
        }
 
        if (ret)
@@ -183,9 +215,9 @@ out:
 }
 
 static int nvme_submit_user_cmd(struct request_queue *q,
-               struct nvme_command *cmd, u64 ubuffer,
-               unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u64 *result, unsigned timeout, bool vec)
+               struct nvme_command *cmd, u64 ubuffer, unsigned bufflen,
+               void __user *meta_buffer, unsigned meta_len, u32 meta_seed,
+               u64 *result, unsigned timeout, unsigned int flags)
 {
        struct nvme_ctrl *ctrl;
        struct request *req;
@@ -201,7 +233,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        req->timeout = timeout;
        if (ubuffer && bufflen) {
                ret = nvme_map_user_request(req, ubuffer, bufflen, meta_buffer,
-                               meta_len, meta_seed, &meta, NULL, vec);
+                               meta_len, meta_seed, &meta, NULL, flags);
                if (ret)
                        return ret;
        }
@@ -284,10 +316,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        c.rw.apptag = cpu_to_le16(io.apptag);
        c.rw.appmask = cpu_to_le16(io.appmask);
 
-       return nvme_submit_user_cmd(ns->queue, &c,
-                       io.addr, length,
-                       metadata, meta_len, lower_32_bits(io.slba), NULL, 0,
-                       false);
+       return nvme_submit_user_cmd(ns->queue, &c, io.addr, length, metadata,
+                       meta_len, lower_32_bits(io.slba), NULL, 0, 0);
 }
 
 static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
@@ -305,7 +335,8 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
 }
 
 static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd __user *ucmd, fmode_t mode)
+               struct nvme_passthru_cmd __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd cmd;
        struct nvme_command c;
@@ -333,16 +364,15 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &result, timeout, false);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &result, timeout, 0);
 
        if (status >= 0) {
                if (put_user(result, &ucmd->result))
@@ -353,8 +383,8 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
 }
 
 static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
-                       struct nvme_passthru_cmd64 __user *ucmd, bool vec,
-                       fmode_t mode)
+               struct nvme_passthru_cmd64 __user *ucmd, unsigned int flags,
+               fmode_t mode)
 {
        struct nvme_passthru_cmd64 cmd;
        struct nvme_command c;
@@ -381,16 +411,15 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(cmd.cdw14);
        c.common.cdw15 = cpu_to_le32(cmd.cdw15);
 
-       if (!nvme_cmd_allowed(ns, &c, mode))
+       if (!nvme_cmd_allowed(ns, &c, flags, mode))
                return -EACCES;
 
        if (cmd.timeout_ms)
                timeout = msecs_to_jiffies(cmd.timeout_ms);
 
        status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
-                       cmd.addr, cmd.data_len,
-                       nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
-                       0, &cmd.result, timeout, vec);
+                       cmd.addr, cmd.data_len, nvme_to_user_ptr(cmd.metadata),
+                       cmd.metadata_len, 0, &cmd.result, timeout, flags);
 
        if (status >= 0) {
                if (put_user(cmd.result, &ucmd->result))
@@ -551,7 +580,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        c.common.cdw14 = cpu_to_le32(READ_ONCE(cmd->cdw14));
        c.common.cdw15 = cpu_to_le32(READ_ONCE(cmd->cdw15));
 
-       if (!nvme_cmd_allowed(ns, &c, ioucmd->file->f_mode))
+       if (!nvme_cmd_allowed(ns, &c, 0, ioucmd->file->f_mode))
                return -EACCES;
 
        d.metadata = READ_ONCE(cmd->metadata);
@@ -621,9 +650,9 @@ static int nvme_ctrl_ioctl(struct nvme_ctrl *ctrl, unsigned int cmd,
 {
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, mode);
        default:
                return sed_ioctl(ctrl->opal_dev, cmd, argp);
        }
@@ -648,14 +677,14 @@ struct nvme_user_io32 {
 #endif /* COMPAT_FOR_U64_ALIGNMENT */
 
 static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
-               void __user *argp, fmode_t mode)
+               void __user *argp, unsigned int flags, fmode_t mode)
 {
        switch (cmd) {
        case NVME_IOCTL_ID:
                force_successful_syscall_return();
                return ns->head->ns_id;
        case NVME_IOCTL_IO_CMD:
-               return nvme_user_cmd(ns->ctrl, ns, argp, mode);
+               return nvme_user_cmd(ns->ctrl, ns, argp, flags, mode);
        /*
         * struct nvme_user_io can have different padding on some 32-bit ABIs.
         * Just accept the compat version as all fields that are used are the
@@ -666,37 +695,40 @@ static int nvme_ns_ioctl(struct nvme_ns *ns, unsigned int cmd,
 #endif
        case NVME_IOCTL_SUBMIT_IO:
                return nvme_submit_io(ns, argp);
-       case NVME_IOCTL_IO64_CMD:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, false, mode);
        case NVME_IOCTL_IO64_CMD_VEC:
-               return nvme_user_cmd64(ns->ctrl, ns, argp, true, mode);
+               flags |= NVME_IOCTL_VEC;
+               fallthrough;
+       case NVME_IOCTL_IO64_CMD:
+               return nvme_user_cmd64(ns->ctrl, ns, argp, flags, mode);
        default:
                return -ENOTTY;
        }
 }
 
-static int __nvme_ioctl(struct nvme_ns *ns, unsigned int cmd, void __user *arg,
-                       fmode_t mode)
-{
-       if (is_ctrl_ioctl(cmd))
-               return nvme_ctrl_ioctl(ns->ctrl, cmd, arg, mode);
-       return nvme_ns_ioctl(ns, cmd, arg, mode);
-}
-
 int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns = bdev->bd_disk->private_data;
+       void __user *argp = (void __user *)arg;
+       unsigned int flags = 0;
+
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, mode);
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, mode);
+       return nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 }
 
 long nvme_ns_chr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct nvme_ns *ns =
                container_of(file_inode(file)->i_cdev, struct nvme_ns, cdev);
+       void __user *argp = (void __user *)arg;
 
-       return __nvme_ioctl(ns, cmd, (void __user *)arg, file->f_mode);
+       if (is_ctrl_ioctl(cmd))
+               return nvme_ctrl_ioctl(ns->ctrl, cmd, argp, file->f_mode);
+       return nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 }
 
 static int nvme_uring_cmd_checks(unsigned int issue_flags)
@@ -786,6 +818,10 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
        void __user *argp = (void __user *)arg;
        struct nvme_ns *ns;
        int srcu_idx, ret = -EWOULDBLOCK;
+       unsigned int flags = 0;
+
+       if (bdev_is_partition(bdev))
+               flags |= NVME_IOCTL_PARTITION;
 
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = nvme_find_path(head);
@@ -801,7 +837,7 @@ int nvme_ns_head_ioctl(struct block_device *bdev, fmode_t mode,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                        mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, flags, mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -826,7 +862,7 @@ long nvme_ns_head_chr_ioctl(struct file *file, unsigned int cmd,
                return nvme_ns_head_ctrl_ioctl(ns, cmd, argp, head, srcu_idx,
                                file->f_mode);
 
-       ret = nvme_ns_ioctl(ns, cmd, argp, file->f_mode);
+       ret = nvme_ns_ioctl(ns, cmd, argp, 0, file->f_mode);
 out_unlock:
        srcu_read_unlock(&head->srcu, srcu_idx);
        return ret;
@@ -925,7 +961,7 @@ static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp,
        kref_get(&ns->kref);
        up_read(&ctrl->namespaces_rwsem);
 
-       ret = nvme_user_cmd(ctrl, ns, argp, mode);
+       ret = nvme_user_cmd(ctrl, ns, argp, 0, mode);
        nvme_put_ns(ns);
        return ret;
 
@@ -942,9 +978,9 @@ long nvme_dev_ioctl(struct file *file, unsigned int cmd,
 
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_cmd(ctrl, NULL, argp, file->f_mode);
+               return nvme_user_cmd(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_ADMIN64_CMD:
-               return nvme_user_cmd64(ctrl, NULL, argp, false, file->f_mode);
+               return nvme_user_cmd64(ctrl, NULL, argp, 0, file->f_mode);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp, file->f_mode);
        case NVME_IOCTL_RESET:
index c03093b6813c599dd5fde84d410438e70ae95a43..fc39d01e7b63be8a4f9e794f915715cbc2c8609b 100644 (file)
@@ -376,6 +376,8 @@ static void nvme_ns_head_submit_bio(struct bio *bio)
         * pool from the original queue to allocate the bvecs from.
         */
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        srcu_idx = srcu_read_lock(&head->srcu);
        ns = nvme_find_path(head);
index 6bbb73ef8b2548f6a5a1647e76a007334c66fe23..424c8a467a0c2a83e3b4d12c1e5daef51fe664c5 100644 (file)
@@ -893,7 +893,7 @@ static inline void nvme_trace_bio_complete(struct request *req)
 {
        struct nvme_ns *ns = req->q->queuedata;
 
-       if (req->cmd_flags & REQ_NVME_MPATH)
+       if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio)
                trace_block_bio_complete(ns->head->disk->queue, req->bio);
 }
 
index f0f8027644bbf8f0a1099e688a8e41498d592d71..1ff8843bc4b36a982bb771b8cf01341d78b3b686 100644 (file)
@@ -36,7 +36,7 @@
 #define SQ_SIZE(q)     ((q)->q_depth << (q)->sqes)
 #define CQ_SIZE(q)     ((q)->q_depth * sizeof(struct nvme_completion))
 
-#define SGES_PER_PAGE  (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+#define SGES_PER_PAGE  (NVME_CTRL_PAGE_SIZE / sizeof(struct nvme_sgl_desc))
 
 /*
  * These can be higher, but we need to ensure that any command doesn't
@@ -144,9 +144,9 @@ struct nvme_dev {
        mempool_t *iod_mempool;
 
        /* shadow doorbell buffer support: */
-       u32 *dbbuf_dbs;
+       __le32 *dbbuf_dbs;
        dma_addr_t dbbuf_dbs_dma_addr;
-       u32 *dbbuf_eis;
+       __le32 *dbbuf_eis;
        dma_addr_t dbbuf_eis_dma_addr;
 
        /* host memory buffer support: */
@@ -208,10 +208,10 @@ struct nvme_queue {
 #define NVMEQ_SQ_CMB           1
 #define NVMEQ_DELETE_ERROR     2
 #define NVMEQ_POLLED           3
-       u32 *dbbuf_sq_db;
-       u32 *dbbuf_cq_db;
-       u32 *dbbuf_sq_ei;
-       u32 *dbbuf_cq_ei;
+       __le32 *dbbuf_sq_db;
+       __le32 *dbbuf_cq_db;
+       __le32 *dbbuf_sq_ei;
+       __le32 *dbbuf_cq_ei;
        struct completion delete_done;
 };
 
@@ -343,11 +343,11 @@ static inline int nvme_dbbuf_need_event(u16 event_idx, u16 new_idx, u16 old)
 }
 
 /* Update dbbuf and return true if an MMIO is required */
-static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
-                                             volatile u32 *dbbuf_ei)
+static bool nvme_dbbuf_update_and_check_event(u16 value, __le32 *dbbuf_db,
+                                             volatile __le32 *dbbuf_ei)
 {
        if (dbbuf_db) {
-               u16 old_value;
+               u16 old_value, event_idx;
 
                /*
                 * Ensure that the queue is written before updating
@@ -355,8 +355,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
                 */
                wmb();
 
-               old_value = *dbbuf_db;
-               *dbbuf_db = value;
+               old_value = le32_to_cpu(*dbbuf_db);
+               *dbbuf_db = cpu_to_le32(value);
 
                /*
                 * Ensure that the doorbell is updated before reading the event
@@ -366,7 +366,8 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
                 */
                mb();
 
-               if (!nvme_dbbuf_need_event(*dbbuf_ei, value, old_value))
+               event_idx = le32_to_cpu(*dbbuf_ei);
+               if (!nvme_dbbuf_need_event(event_idx, value, old_value))
                        return false;
        }
 
@@ -380,9 +381,9 @@ static bool nvme_dbbuf_update_and_check_event(u16 value, u32 *dbbuf_db,
  */
 static int nvme_pci_npages_prp(void)
 {
-       unsigned nprps = DIV_ROUND_UP(NVME_MAX_KB_SZ + NVME_CTRL_PAGE_SIZE,
-                                     NVME_CTRL_PAGE_SIZE);
-       return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+       unsigned max_bytes = (NVME_MAX_KB_SZ * 1024) + NVME_CTRL_PAGE_SIZE;
+       unsigned nprps = DIV_ROUND_UP(max_bytes, NVME_CTRL_PAGE_SIZE);
+       return DIV_ROUND_UP(8 * nprps, NVME_CTRL_PAGE_SIZE - 8);
 }
 
 /*
@@ -392,7 +393,7 @@ static int nvme_pci_npages_prp(void)
 static int nvme_pci_npages_sgl(void)
 {
        return DIV_ROUND_UP(NVME_MAX_SEGS * sizeof(struct nvme_sgl_desc),
-                       PAGE_SIZE);
+                       NVME_CTRL_PAGE_SIZE);
 }
 
 static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
@@ -708,7 +709,7 @@ static void nvme_pci_sgl_set_seg(struct nvme_sgl_desc *sge,
                sge->length = cpu_to_le32(entries * sizeof(*sge));
                sge->type = NVME_SGL_FMT_LAST_SEG_DESC << 4;
        } else {
-               sge->length = cpu_to_le32(PAGE_SIZE);
+               sge->length = cpu_to_le32(NVME_CTRL_PAGE_SIZE);
                sge->type = NVME_SGL_FMT_SEG_DESC << 4;
        }
 }
@@ -1361,7 +1362,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        else
                nvme_poll_irqdisable(nvmeq);
 
-       if (blk_mq_request_completed(req)) {
+       if (blk_mq_rq_state(req) != MQ_RQ_IN_FLIGHT) {
                dev_warn(dev->ctrl.device,
                         "I/O %d QID %d timeout, completion polled\n",
                         req->tag, nvmeq->qid);
@@ -2332,10 +2333,12 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
        if (dev->cmb_use_sqes) {
                result = nvme_cmb_qdepth(dev, nr_io_queues,
                                sizeof(struct nvme_command));
-               if (result > 0)
+               if (result > 0) {
                        dev->q_depth = result;
-               else
+                       dev->ctrl.sqsize = result - 1;
+               } else {
                        dev->cmb_use_sqes = false;
+               }
        }
 
        do {
@@ -2530,13 +2533,12 @@ static int nvme_pci_enable(struct nvme_dev *dev)
         */
        result = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
        if (result < 0)
-               return result;
+               goto disable;
 
        dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP);
 
        dev->q_depth = min_t(u32, NVME_CAP_MQES(dev->ctrl.cap) + 1,
                                io_queue_depth);
-       dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
        dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap);
        dev->dbs = dev->bar + 4096;
 
@@ -2577,15 +2579,20 @@ static int nvme_pci_enable(struct nvme_dev *dev)
                dev_warn(dev->ctrl.device, "IO queue depth clamped to %d\n",
                         dev->q_depth);
        }
-
+       dev->ctrl.sqsize = dev->q_depth - 1; /* 0's based queue depth */
 
        nvme_map_cmb(dev);
 
        pci_enable_pcie_error_reporting(pdev);
        pci_save_state(pdev);
 
-       return nvme_pci_configure_admin_queue(dev);
+       result = nvme_pci_configure_admin_queue(dev);
+       if (result)
+               goto free_irq;
+       return result;
 
+ free_irq:
+       pci_free_irq_vectors(pdev);
  disable:
        pci_disable_device(pdev);
        return result;
@@ -3493,7 +3500,8 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_SINGLE_VECTOR |
                                NVME_QUIRK_128_BYTES_SQES |
                                NVME_QUIRK_SHARED_TAGS |
-                               NVME_QUIRK_SKIP_CID_GEN },
+                               NVME_QUIRK_SKIP_CID_GEN |
+                               NVME_QUIRK_IDENTIFY_CNS },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { 0, }
 };
index 53a004ea320c1ff1a04eefb2169bcb253c55c020..6a54ed6fb1214404d7738bf32c7b14d2441ee6d6 100644 (file)
@@ -164,26 +164,31 @@ out:
 
 static void nvmet_get_cmd_effects_nvm(struct nvme_effects_log *log)
 {
-       log->acs[nvme_admin_get_log_page]       = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_identify]           = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_abort_cmd]          = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_set_features]       = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_get_features]       = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_async_event]        = cpu_to_le32(1 << 0);
-       log->acs[nvme_admin_keep_alive]         = cpu_to_le32(1 << 0);
-
-       log->iocs[nvme_cmd_read]                = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_write]               = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_flush]               = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_dsm]                 = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_write_zeroes]        = cpu_to_le32(1 << 0);
+       log->acs[nvme_admin_get_log_page] =
+       log->acs[nvme_admin_identify] =
+       log->acs[nvme_admin_abort_cmd] =
+       log->acs[nvme_admin_set_features] =
+       log->acs[nvme_admin_get_features] =
+       log->acs[nvme_admin_async_event] =
+       log->acs[nvme_admin_keep_alive] =
+               cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+
+       log->iocs[nvme_cmd_read] =
+       log->iocs[nvme_cmd_flush] =
+       log->iocs[nvme_cmd_dsm] =
+               cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
+       log->iocs[nvme_cmd_write] =
+       log->iocs[nvme_cmd_write_zeroes] =
+               cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
 }
 
 static void nvmet_get_cmd_effects_zns(struct nvme_effects_log *log)
 {
-       log->iocs[nvme_cmd_zone_append]         = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_zone_mgmt_send]      = cpu_to_le32(1 << 0);
-       log->iocs[nvme_cmd_zone_mgmt_recv]      = cpu_to_le32(1 << 0);
+       log->iocs[nvme_cmd_zone_append] =
+       log->iocs[nvme_cmd_zone_mgmt_send] =
+               cpu_to_le32(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC);
+       log->iocs[nvme_cmd_zone_mgmt_recv] =
+               cpu_to_le32(NVME_CMD_EFFECTS_CSUPP);
 }
 
 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req)
index 79af5140af8bfe5cd10c0a3a7bc32fdaeae631ca..adc0958755d66f738f41fc588e2c359f0949bf69 100644 (file)
@@ -334,14 +334,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
        }
 
        /*
-        * If there are effects for the command we are about to execute, or
-        * an end_req function we need to use nvme_execute_passthru_rq()
-        * synchronously in a work item seeing the end_req function and
-        * nvme_passthru_end() can't be called in the request done callback
-        * which is typically in interrupt context.
+        * If a command needs post-execution fixups, or there are any
+        * non-trivial effects, make sure to execute the command synchronously
+        * in a workqueue so that nvme_passthru_end gets called.
         */
        effects = nvme_command_effects(ctrl, ns, req->cmd->common.opcode);
-       if (req->p.use_workqueue || effects) {
+       if (req->p.use_workqueue ||
+           (effects & ~(NVME_CMD_EFFECTS_CSUPP | NVME_CMD_EFFECTS_LBCC))) {
                INIT_WORK(&req->p.work, nvmet_passthru_execute_cmd_work);
                req->p.rq = rq;
                queue_work(nvmet_wq, &req->p.work);
index b2272bccf85c95ac68c24eb530c743fe21d1223e..f08b25195ae79d2b51468dd89d6d3e3c20b93f2d 100644 (file)
@@ -1099,7 +1099,7 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
  */
 int __init early_init_dt_scan_memory(void)
 {
-       int node;
+       int node, found_memory = 0;
        const void *fdt = initial_boot_params;
 
        fdt_for_each_subnode(node, fdt, 0) {
@@ -1139,6 +1139,8 @@ int __init early_init_dt_scan_memory(void)
 
                        early_init_dt_add_memory_arch(base, size);
 
+                       found_memory = 1;
+
                        if (!hotpluggable)
                                continue;
 
@@ -1147,7 +1149,7 @@ int __init early_init_dt_scan_memory(void)
                                        base, base + size);
                }
        }
-       return 0;
+       return found_memory;
 }
 
 int __init early_init_dt_scan_chosen(char *cmdline)
@@ -1161,18 +1163,14 @@ int __init early_init_dt_scan_chosen(char *cmdline)
        if (node < 0)
                node = fdt_path_offset(fdt, "/chosen@0");
        if (node < 0)
-               return -ENOENT;
+               /* Handle the cmdline config options even if no /chosen node */
+               goto handle_cmdline;
 
        chosen_node_offset = node;
 
        early_init_dt_check_for_initrd(node);
        early_init_dt_check_for_elfcorehdr(node);
 
-       /* Retrieve command line */
-       p = of_get_flat_dt_prop(node, "bootargs", &l);
-       if (p != NULL && l > 0)
-               strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
-
        rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
        if (rng_seed && l > 0) {
                add_bootloader_randomness(rng_seed, l);
@@ -1185,6 +1183,32 @@ int __init early_init_dt_scan_chosen(char *cmdline)
                                fdt_totalsize(initial_boot_params));
        }
 
+       /* Retrieve command line */
+       p = of_get_flat_dt_prop(node, "bootargs", &l);
+       if (p != NULL && l > 0)
+               strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+
+handle_cmdline:
+       /*
+        * CONFIG_CMDLINE is meant to be a default in case nothing else
+        * managed to set the command line, unless CONFIG_CMDLINE_FORCE
+        * is set in which case we override whatever was found earlier.
+        */
+#ifdef CONFIG_CMDLINE
+#if defined(CONFIG_CMDLINE_EXTEND)
+       strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+       strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#elif defined(CONFIG_CMDLINE_FORCE)
+       strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#else
+       /* No arguments from boot loader, use kernel's  cmdl*/
+       if (!((char *)cmdline)[0])
+               strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif
+#endif /* CONFIG_CMDLINE */
+
+       pr_debug("Command line is: %s\n", (char *)cmdline);
+
        return 0;
 }
 
@@ -1277,26 +1301,6 @@ void __init early_init_dt_scan_nodes(void)
        if (rc)
                pr_warn("No chosen node found, continuing without\n");
 
-       /*
-        * CONFIG_CMDLINE is meant to be a default in case nothing else
-        * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-        * is set in which case we override whatever was found earlier.
-        */
-#ifdef CONFIG_CMDLINE
-#if defined(CONFIG_CMDLINE_EXTEND)
-       strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
-       strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#elif defined(CONFIG_CMDLINE_FORCE)
-       strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#else
-       /* No arguments from boot loader, use kernel's cmdl */
-       if (!boot_command_line[0])
-               strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
-#endif /* CONFIG_CMDLINE */
-
-       pr_debug("Command line is: %s\n", boot_command_line);
-
        /* Setup memory, calling early_init_dt_add_memory_arch */
        early_init_dt_scan_memory();
 
index a0d2713f0e8896ef3936b0774707604ab1e1203b..99ec91e2a5cfa3ae67a12a09c002bc06c80d7846 100644 (file)
@@ -225,7 +225,7 @@ config PCIE_ARTPEC6_EP
 config PCIE_BT1
        tristate "Baikal-T1 PCIe controller"
        depends on MIPS_BAIKAL_T1 || COMPILE_TEST
-       depends on PCI_MSI_IRQ_DOMAIN
+       depends on PCI_MSI
        select PCIE_DW_HOST
        help
          Enables support for the PCIe controller in the Baikal-T1 SoC to work
index 7378e2f3e525fd1290cc482e8329a74d635685b5..fcd029ca2eb184a9e4ee48773c1dd1e906be8fd1 100644 (file)
@@ -1055,14 +1055,12 @@ out:
        return err;
 }
 
-static int pcifront_xenbus_remove(struct xenbus_device *xdev)
+static void pcifront_xenbus_remove(struct xenbus_device *xdev)
 {
        struct pcifront_device *pdev = dev_get_drvdata(&xdev->dev);
 
        if (pdev)
                free_pdev(pdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id xenpci_ids[] = {
index 7585e8080b77d58247aa94f5020f2aa98bcc9c30..afc63552ecaf7994738f9cfad53ef852fde9aa3b 100644 (file)
@@ -255,7 +255,7 @@ static int imx8_pcie_phy_probe(struct platform_device *pdev)
                imx8_phy->perst =
                        devm_reset_control_get_exclusive(dev, "perst");
                if (IS_ERR(imx8_phy->perst))
-                       dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
+                       return dev_err_probe(dev, PTR_ERR(imx8_phy->perst),
                                      "Failed to get PCIE PHY PERST control\n");
        }
 
index 95c6dbb52da720bffcba85ff9ad037daed688009..ce511ad5d36907631c8c44a730bfe04fd8b66c57 100644 (file)
@@ -99,6 +99,7 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
        struct gpio_desc *standby_gpio;
        struct gpio_desc *enable_gpio;
        u32 max_bitrate = 0;
+       int err;
 
        can_transceiver_phy = devm_kzalloc(dev, sizeof(struct can_transceiver_phy), GFP_KERNEL);
        if (!can_transceiver_phy)
@@ -124,8 +125,8 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
                return PTR_ERR(phy);
        }
 
-       device_property_read_u32(dev, "max-bitrate", &max_bitrate);
-       if (!max_bitrate)
+       err = device_property_read_u32(dev, "max-bitrate", &max_bitrate);
+       if ((err != -EINVAL) && !max_bitrate)
                dev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit\n");
        phy->attrs.max_link_rate = max_bitrate;
 
index 8807e59a1162cb05591546e8d4fd06ea3ca6dfff..a52a9bf13b758d99c95786a8e9e8d1cbe2fe15bf 100644 (file)
@@ -401,26 +401,13 @@ static const struct hsphy_init_seq init_seq_femtophy[] = {
        HSPHY_INIT_CFG(0x90, 0x60, 0),
 };
 
-static const struct hsphy_init_seq init_seq_mdm9607[] = {
-       HSPHY_INIT_CFG(0x80, 0x44, 0),
-       HSPHY_INIT_CFG(0x81, 0x38, 0),
-       HSPHY_INIT_CFG(0x82, 0x24, 0),
-       HSPHY_INIT_CFG(0x83, 0x13, 0),
-};
-
 static const struct hsphy_data hsphy_data_femtophy = {
        .init_seq = init_seq_femtophy,
        .init_seq_num = ARRAY_SIZE(init_seq_femtophy),
 };
 
-static const struct hsphy_data hsphy_data_mdm9607 = {
-       .init_seq = init_seq_mdm9607,
-       .init_seq_num = ARRAY_SIZE(init_seq_mdm9607),
-};
-
 static const struct of_device_id qcom_snps_hsphy_match[] = {
        { .compatible = "qcom,usb-hs-28nm-femtophy", .data = &hsphy_data_femtophy, },
-       { .compatible = "qcom,usb-hs-28nm-mdm9607", .data = &hsphy_data_mdm9607, },
        { },
 };
 MODULE_DEVICE_TABLE(of, qcom_snps_hsphy_match);
index ec6594e6dc275bf1c7fac5888736ee5263b0be91..e7588a940d699129e8644b922ab6c1fd688b2427 100644 (file)
@@ -126,7 +126,7 @@ r8a779f0_eth_serdes_chan_setting(struct r8a779f0_eth_serdes_channel *channel)
                r8a779f0_eth_serdes_write32(channel->addr, 0x0160, 0x180, 0x0007);
                r8a779f0_eth_serdes_write32(channel->addr, 0x01ac, 0x180, 0x0000);
                r8a779f0_eth_serdes_write32(channel->addr, 0x00c4, 0x180, 0x0310);
-               r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x380, 0x0101);
+               r8a779f0_eth_serdes_write32(channel->addr, 0x00c8, 0x180, 0x0101);
                ret = r8a779f0_eth_serdes_reg_wait(channel, 0x00c8, 0x0180, BIT(0), 0);
                if (ret)
                        return ret;
index e6ededc515239086a7b2e38ffd0a453f6ec88ba6..a0bc10aa79618350caf846a2bbce0cb1ea5d0039 100644 (file)
@@ -485,8 +485,10 @@ static int rockchip_usb2phy_power_on(struct phy *phy)
                return ret;
 
        ret = property_enable(base, &rport->port_cfg->phy_sus, false);
-       if (ret)
+       if (ret) {
+               clk_disable_unprepare(rphy->clk480m);
                return ret;
+       }
 
        /* waiting for the utmi_clk to become stable */
        usleep_range(1500, 2000);
index e827b79f6d493cb9c1571221d7940e16741b0a2a..56de41091d639721fd797491c04684339d4eb64d 100644 (file)
@@ -254,6 +254,9 @@ static int sp_usb_phy_probe(struct platform_device *pdev)
                return PTR_ERR(usbphy->phy_regs);
 
        usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4");
+       if (!usbphy->moon4_res_mem)
+               return -EINVAL;
+
        usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start,
                                          resource_size(usbphy->moon4_res_mem));
        if (!usbphy->moon4_regs)
index 15a3bcf32308603597a9ae5d177c2091e20a6dcd..b905902d57508d0c6f9d05fae9b0d9576d2cf0d9 100644 (file)
@@ -23,7 +23,7 @@ config PHY_DM816X_USB
 
 config PHY_AM654_SERDES
        tristate "TI AM654 SERDES support"
-       depends on OF && ARCH_K3 || COMPILE_TEST
+       depends on OF && (ARCH_K3 || COMPILE_TEST)
        depends on COMMON_CLK
        select GENERIC_PHY
        select MULTIPLEXER
@@ -35,7 +35,7 @@ config PHY_AM654_SERDES
 
 config PHY_J721E_WIZ
        tristate "TI J721E WIZ (SERDES Wrapper) support"
-       depends on OF && ARCH_K3 || COMPILE_TEST
+       depends on OF && (ARCH_K3 || COMPILE_TEST)
        depends on HAS_IOMEM && OF_ADDRESS
        depends on COMMON_CLK
        select GENERIC_PHY
index 3106a21cd277fe11eb32ff8006c8e38d20be3ca5..d7b244df058f28200d21132c889d95abf1150cd1 100644 (file)
@@ -6,9 +6,10 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/gpio/driver.h>
 #include <linux/pinctrl/pinctrl.h>
+
 #include <linux/mfd/abx500/ab8500.h>
+
 #include "pinctrl-abx500.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index b93af1fb37f017f45d200dd1604ae42f2acd7da4..45aa958b573e0af81fc3872f30215a7ecd61f25c 100644 (file)
@@ -6,9 +6,10 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/gpio/driver.h>
 #include <linux/pinctrl/pinctrl.h>
+
 #include <linux/mfd/abx500/ab8500.h>
+
 #include "pinctrl-abx500.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index 7aa534576a459840042dd243a333e84aef1ce4a2..28c3403df1b0387215b3e106e7eca92bd722cd08 100644 (file)
@@ -6,33 +6,37 @@
  *
  * Driver allows to use AxB5xx unused pins to be used as GPIO
  */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/init.h>
+#include <linux/bitops.h>
 #include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
 #include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqdomain.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
 #include <linux/mfd/abx500.h>
 #include <linux/mfd/abx500/ab8500.h>
-#include <linux/pinctrl/pinctrl.h>
+
 #include <linux/pinctrl/consumer.h>
-#include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-#include <linux/pinctrl/pinconf-generic.h>
 #include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
 
-#include "pinctrl-abx500.h"
 #include "../core.h"
 #include "../pinconf.h"
 #include "../pinctrl-utils.h"
 
+#include "pinctrl-abx500.h"
+
 /*
  * GPIO registers offset
  * Bank: 0x10
index 90bb12fe8073fd3d1110a8f1f0496deb3031bdb1..d675220846752608e0eab08dbaa406d5fe3b5d2e 100644 (file)
@@ -2,6 +2,10 @@
 #ifndef PINCTRL_PINCTRL_ABx500_H
 #define PINCTRL_PINCTRL_ABx500_H
 
+#include <linux/types.h>
+
+struct pinctrl_pin_desc;
+
 /* Package definitions */
 #define PINCTRL_AB8500 0
 #define PINCTRL_AB8505 1
index 758d21f0a8503a5d4c1548b2d06cae52d34acb06..490e0959e8be6c27e584dd9c81ce839cfba2118d 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kernel.h>
+#include <linux/types.h>
+
 #include <linux/pinctrl/pinctrl.h>
+
 #include "pinctrl-nomadik.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index c0d7c86d09391035b6d3d9f8cde690af9f2afb4c..1552222ac68e74482dcfb660ac38064e6e12ecc8 100644 (file)
@@ -1,6 +1,9 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/kernel.h>
+#include <linux/types.h>
+
 #include <linux/pinctrl/pinctrl.h>
+
 #include "pinctrl-nomadik.h"
 
 /* All the pins that can be used for GPIO and some other functions */
index f7d02513d8cc103f98cdc5752dd0abb17df93020..86a638077a697391136bb9057bd306c77c5b63f3 100644 (file)
@@ -7,30 +7,34 @@
  *   Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
  * Copyright (C) 2011-2013 Linus Walleij <linus.walleij@linaro.org>
  */
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/bitops.h>
 #include <linux/clk.h>
+#include <linux/device.h>
 #include <linux/err.h>
 #include <linux/gpio/driver.h>
-#include <linux/spinlock.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/of_device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/of_address.h>
-#include <linux/bitops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+/* Since we request GPIOs from ourself */
+#include <linux/pinctrl/consumer.h>
 #include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
 #include <linux/pinctrl/pinctrl.h>
 #include <linux/pinctrl/pinmux.h>
-#include <linux/pinctrl/pinconf.h>
-/* Since we request GPIOs from ourself */
-#include <linux/pinctrl/consumer.h>
-#include "pinctrl-nomadik.h"
+
 #include "../core.h"
 #include "../pinctrl-utils.h"
 
+#include "pinctrl-nomadik.h"
+
 /*
  * The GPIO module in the Nomadik family of Systems-on-Chip is an
  * AMBA device, managing 32 pins and alternate functions.  The logic block
@@ -907,8 +911,6 @@ static int nmk_gpio_get_mode(struct nmk_gpio_chip *nmk_chip, int offset)
        return (afunc ? NMK_GPIO_ALT_A : 0) | (bfunc ? NMK_GPIO_ALT_B : 0);
 }
 
-#include <linux/seq_file.h>
-
 static void nmk_gpio_dbg_show_one(struct seq_file *s,
        struct pinctrl_dev *pctldev, struct gpio_chip *chip,
        unsigned offset, unsigned gpio)
index 84e297757335756a8a6e5efc298b397e1ee0a707..1ef2559bc5710bdcb5aeec0c9de81a24d334955d 100644 (file)
@@ -2,6 +2,11 @@
 #ifndef PINCTRL_PINCTRL_NOMADIK_H
 #define PINCTRL_PINCTRL_NOMADIK_H
 
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include <linux/pinctrl/pinctrl.h>
+
 /* Package definitions */
 #define PINCTRL_NMK_STN8815    0
 #define PINCTRL_NMK_DB8500     1
index da974ff2d75d0f9898603e7fe7ae3167ef384217..5eeac92f610a058dc8fa60ab59692f72080f26fb 100644 (file)
@@ -926,19 +926,19 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_PMU(0, RK_PB5, 4, 0x0110, WRITE_MASK_VAL(3, 2, 1)), /* PWM1 IO mux M1 */
        RK_MUXROUTE_PMU(0, RK_PC1, 1, 0x0110, WRITE_MASK_VAL(5, 4, 0)), /* PWM2 IO mux M0 */
        RK_MUXROUTE_PMU(0, RK_PB6, 4, 0x0110, WRITE_MASK_VAL(5, 4, 1)), /* PWM2 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PB3, 2, 0x0300, WRITE_MASK_VAL(0, 0, 0)), /* CAN0 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PA1, 4, 0x0300, WRITE_MASK_VAL(0, 0, 1)), /* CAN0 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA1, 3, 0x0300, WRITE_MASK_VAL(2, 2, 0)), /* CAN1 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PC3, 3, 0x0300, WRITE_MASK_VAL(2, 2, 1)), /* CAN1 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PB5, 3, 0x0300, WRITE_MASK_VAL(4, 4, 0)), /* CAN2 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PB2, 4, 0x0300, WRITE_MASK_VAL(4, 4, 1)), /* CAN2 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PC4, 1, 0x0300, WRITE_MASK_VAL(6, 6, 0)), /* HPDIN IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PC2, 2, 0x0300, WRITE_MASK_VAL(6, 6, 1)), /* HPDIN IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PB1, 3, 0x0300, WRITE_MASK_VAL(8, 8, 0)), /* GMAC1 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PA7, 3, 0x0300, WRITE_MASK_VAL(8, 8, 1)), /* GMAC1 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PD1, 1, 0x0300, WRITE_MASK_VAL(10, 10, 0)), /* HDMITX IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PC7, 1, 0x0300, WRITE_MASK_VAL(10, 10, 1)), /* HDMITX IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PB6, 1, 0x0300, WRITE_MASK_VAL(14, 14, 0)), /* I2C2 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PB4, 1, 0x0300, WRITE_MASK_VAL(14, 14, 1)), /* I2C2 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA0, 1, 0x0304, WRITE_MASK_VAL(0, 0, 0)), /* I2C3 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PB6, 4, 0x0304, WRITE_MASK_VAL(0, 0, 1)), /* I2C3 IO mux M1 */
@@ -964,7 +964,7 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(4, RK_PC3, 1, 0x0308, WRITE_MASK_VAL(12, 12, 1)), /* PWM15 IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PD2, 3, 0x0308, WRITE_MASK_VAL(14, 14, 0)), /* SDMMC2 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PA5, 5, 0x0308, WRITE_MASK_VAL(14, 14, 1)), /* SDMMC2 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PB5, 2, 0x030c, WRITE_MASK_VAL(0, 0, 0)), /* SPI0 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD3, 3, 0x030c, WRITE_MASK_VAL(0, 0, 1)), /* SPI0 IO mux M1 */
        RK_MUXROUTE_GRF(2, RK_PB5, 3, 0x030c, WRITE_MASK_VAL(2, 2, 0)), /* SPI1 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PC3, 3, 0x030c, WRITE_MASK_VAL(2, 2, 1)), /* SPI1 IO mux M1 */
@@ -973,8 +973,8 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(4, RK_PB3, 4, 0x030c, WRITE_MASK_VAL(6, 6, 0)), /* SPI3 IO mux M0 */
        RK_MUXROUTE_GRF(4, RK_PC2, 2, 0x030c, WRITE_MASK_VAL(6, 6, 1)), /* SPI3 IO mux M1 */
        RK_MUXROUTE_GRF(2, RK_PB4, 2, 0x030c, WRITE_MASK_VAL(8, 8, 0)), /* UART1 IO mux M0 */
-       RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
-       RK_MUXROUTE_PMU(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
+       RK_MUXROUTE_GRF(3, RK_PD6, 4, 0x030c, WRITE_MASK_VAL(8, 8, 1)), /* UART1 IO mux M1 */
+       RK_MUXROUTE_GRF(0, RK_PD1, 1, 0x030c, WRITE_MASK_VAL(10, 10, 0)), /* UART2 IO mux M0 */
        RK_MUXROUTE_GRF(1, RK_PD5, 2, 0x030c, WRITE_MASK_VAL(10, 10, 1)), /* UART2 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA1, 2, 0x030c, WRITE_MASK_VAL(12, 12, 0)), /* UART3 IO mux M0 */
        RK_MUXROUTE_GRF(3, RK_PB7, 4, 0x030c, WRITE_MASK_VAL(12, 12, 1)), /* UART3 IO mux M1 */
@@ -1004,13 +1004,13 @@ static struct rockchip_mux_route_data rk3568_mux_route_data[] = {
        RK_MUXROUTE_GRF(3, RK_PD6, 5, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PA0, 4, 0x0314, WRITE_MASK_VAL(1, 0, 1)), /* PDM IO mux M1 */
        RK_MUXROUTE_GRF(3, RK_PC4, 5, 0x0314, WRITE_MASK_VAL(1, 0, 2)), /* PDM IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA5, 3, 0x0314, WRITE_MASK_VAL(3, 2, 0)), /* PCIE20 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 1)), /* PCIE20 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PB0, 4, 0x0314, WRITE_MASK_VAL(3, 2, 2)), /* PCIE20 IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA4, 3, 0x0314, WRITE_MASK_VAL(5, 4, 0)), /* PCIE30X1 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD2, 4, 0x0314, WRITE_MASK_VAL(5, 4, 1)), /* PCIE30X1 IO mux M1 */
        RK_MUXROUTE_GRF(1, RK_PA5, 4, 0x0314, WRITE_MASK_VAL(5, 4, 2)), /* PCIE30X1 IO mux M2 */
-       RK_MUXROUTE_PMU(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
+       RK_MUXROUTE_GRF(0, RK_PA6, 2, 0x0314, WRITE_MASK_VAL(7, 6, 0)), /* PCIE30X2 IO mux M0 */
        RK_MUXROUTE_GRF(2, RK_PD4, 4, 0x0314, WRITE_MASK_VAL(7, 6, 1)), /* PCIE30X2 IO mux M1 */
        RK_MUXROUTE_GRF(4, RK_PC2, 4, 0x0314, WRITE_MASK_VAL(7, 6, 2)), /* PCIE30X2 IO mux M2 */
 };
@@ -2436,10 +2436,19 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
        case RK3308:
        case RK3368:
        case RK3399:
+       case RK3568:
        case RK3588:
                pull_type = bank->pull_type[pin_num / 8];
                data >>= bit;
                data &= (1 << RK3188_PULL_BITS_PER_PIN) - 1;
+               /*
+                * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
+                * where that pull up value becomes 3.
+                */
+               if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
+                       if (data == 3)
+                               data = 1;
+               }
 
                return rockchip_pull_list[pull_type][data];
        default:
@@ -2497,7 +2506,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
                        }
                }
                /*
-                * In the TRM, pull-up being 1 for everything except the GPIO0_D0-D6,
+                * In the TRM, pull-up being 1 for everything except the GPIO0_D3-D6,
                 * where that pull up value becomes 3.
                 */
                if (ctrl->type == RK3568 && bank->bank_num == 0 && pin_num >= 27 && pin_num <= 30) {
index 2b3335ab56c66867b21c4c41d385acc4b001fa9d..25101293268f6f579d569bf8ccd312c488620849 100644 (file)
@@ -499,7 +499,6 @@ static int sppctl_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
        return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
 static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
 {
        const char *label;
@@ -521,7 +520,6 @@ static void sppctl_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
                seq_puts(s, "\n");
        }
 }
-#endif
 
 static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pctl)
 {
@@ -550,9 +548,8 @@ static int sppctl_gpio_new(struct platform_device *pdev, struct sppctl_pdata *pc
        gchip->get              = sppctl_gpio_get;
        gchip->set              = sppctl_gpio_set;
        gchip->set_config       = sppctl_gpio_set_config;
-#ifdef CONFIG_DEBUG_FS
-       gchip->dbg_show         = sppctl_gpio_dbg_show;
-#endif
+       gchip->dbg_show         = IS_ENABLED(CONFIG_DEBUG_FS) ?
+                                 sppctl_gpio_dbg_show : NULL;
        gchip->base             = -1;
        gchip->ngpio            = sppctl_gpio_list_sz;
        gchip->names            = sppctl_gpio_list_s;
index 43e7651991371b5ea292d0d379f8dcfd72dfeab4..c6537a1b3a2ec13ac2f32f7f82f87d7406cb53cb 100644 (file)
@@ -1700,8 +1700,10 @@ int ssam_request_sync(struct ssam_controller *ctrl,
                return status;
 
        status = ssam_request_sync_init(rqst, spec->flags);
-       if (status)
+       if (status) {
+               ssam_request_sync_free(rqst);
                return status;
+       }
 
        ssam_request_sync_set_resp(rqst, rsp);
 
index f5565570f16c79f927179a018464cc6bf6b70d7f..69132976d297e24948de68f3ccb0f8067e54f1e4 100644 (file)
@@ -916,6 +916,20 @@ static void ssh_rtl_rx_command(struct ssh_ptl *p, const struct ssam_span *data)
        if (sshp_parse_command(dev, data, &command, &command_data))
                return;
 
+       /*
+        * Check if the message was intended for us. If not, drop it.
+        *
+        * Note: We will need to change this to handle debug messages. On newer
+        * generation devices, these seem to be sent to tid_out=0x03. We as
+        * host can still receive them as they can be forwarded via an override
+        * option on SAM, but doing so does not change tid_out=0x00.
+        */
+       if (command->tid_out != 0x00) {
+               rtl_warn(rtl, "rtl: dropping message not intended for us (tid = %#04x)\n",
+                        command->tid_out);
+               return;
+       }
+
        if (ssh_rqid_is_event(get_unaligned_le16(&command->rqid)))
                ssh_rtl_rx_event(rtl, command, &command_data);
        else
index 439d282aafd192a9f051e553f2fe3b2caabfe0e0..8d924986381bef2974b842d73b0eae25a31f099a 100644 (file)
@@ -932,7 +932,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
        if (enable_stb && (dev->cpu_id == AMD_CPU_ID_YC || dev->cpu_id == AMD_CPU_ID_CB)) {
                err = amd_pmc_s2d_init(dev);
                if (err)
-                       return err;
+                       goto err_pci_dev_put;
        }
 
        platform_set_drvdata(pdev, dev);
index c685a705b73dd0c032e3606a0bd6fba00b9a0c44..cb15acdf14a30a53a2cf5f53fbc618c3f1abbba2 100644 (file)
@@ -121,6 +121,10 @@ static struct quirk_entry quirk_asus_tablet_mode = {
        .tablet_switch_mode = asus_wmi_lid_flip_rog_devid,
 };
 
+static struct quirk_entry quirk_asus_ignore_fan = {
+       .wmi_ignore_fan = true,
+};
+
 static int dmi_matched(const struct dmi_system_id *dmi)
 {
        pr_info("Identified laptop model '%s'\n", dmi->ident);
@@ -473,6 +477,15 @@ static const struct dmi_system_id asus_quirks[] = {
                },
                .driver_data = &quirk_asus_tablet_mode,
        },
+       {
+               .callback = dmi_matched,
+               .ident = "ASUS VivoBook E410MA",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "E410MA"),
+               },
+               .driver_data = &quirk_asus_ignore_fan,
+       },
        {},
 };
 
@@ -511,6 +524,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x30, { KEY_VOLUMEUP } },
        { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
        { KE_KEY, 0x32, { KEY_MUTE } },
+       { KE_KEY, 0x33, { KEY_SCREENLOCK } },
        { KE_KEY, 0x35, { KEY_SCREENLOCK } },
        { KE_KEY, 0x38, { KEY_PROG3 } }, /* Armoury Crate */
        { KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
@@ -544,6 +558,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
        { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */
        { KE_KEY, 0x7E, { KEY_BLUETOOTH } }, /* Bluetooth Disable */
        { KE_KEY, 0x82, { KEY_CAMERA } },
+       { KE_KEY, 0x85, { KEY_CAMERA } },
        { KE_KEY, 0x86, { KEY_PROG1 } }, /* MyASUS Key */
        { KE_KEY, 0x88, { KEY_RFKILL  } }, /* Radio Toggle Key */
        { KE_KEY, 0x8A, { KEY_PROG1 } }, /* Color enhancement mode */
index 6f81b2844dcbea2789faea6e4dc821ad6847ed12..104188d70988cbae085b1050fb7d3832e5a5fdb8 100644 (file)
@@ -2243,7 +2243,9 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
        asus->fan_type = FAN_TYPE_NONE;
        asus->agfn_pwm = -1;
 
-       if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
+       if (asus->driver->quirks->wmi_ignore_fan)
+               asus->fan_type = FAN_TYPE_NONE;
+       else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_CPU_FAN_CTRL))
                asus->fan_type = FAN_TYPE_SPEC83;
        else if (asus_wmi_has_agfn_fan(asus))
                asus->fan_type = FAN_TYPE_AGFN;
@@ -2436,6 +2438,9 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
 
        *available = false;
 
+       if (asus->fan_type == FAN_TYPE_NONE)
+               return 0;
+
        err = fan_curve_get_factory_default(asus, fan_dev);
        if (err) {
                return 0;
index 65316998b898aaf7cbff2dbc3c1a969ac9e93de9..a478ebfd34dfadf28d0be0976df011f4fa854a7b 100644 (file)
@@ -38,6 +38,7 @@ struct quirk_entry {
        bool store_backlight_power;
        bool wmi_backlight_set_devstate;
        bool wmi_force_als_set;
+       bool wmi_ignore_fan;
        enum asus_wmi_tablet_switch_mode tablet_switch_mode;
        int wapf;
        /*
index c82b3d6867c5b28869bf33c911a7613e9efec657..c517bd45dd32e4154f239e01ccf1bd98c606ded2 100644 (file)
@@ -61,7 +61,7 @@ static const struct key_entry dell_wmi_keymap_type_0012[] = {
        /* privacy mic mute */
        { KE_KEY, 0x0001, { KEY_MICMUTE } },
        /* privacy camera mute */
-       { KE_SW,  0x0002, { SW_CAMERA_LENS_COVER } },
+       { KE_VSW, 0x0002, { SW_CAMERA_LENS_COVER } },
        { KE_END, 0},
 };
 
@@ -115,11 +115,15 @@ bool dell_privacy_process_event(int type, int code, int status)
 
        switch (code) {
        case DELL_PRIVACY_AUDIO_EVENT: /* Mic mute */
-       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
                priv->last_status = status;
                sparse_keymap_report_entry(priv->input_dev, key, 1, true);
                ret = true;
                break;
+       case DELL_PRIVACY_CAMERA_EVENT: /* Camera mute */
+               priv->last_status = status;
+               sparse_keymap_report_entry(priv->input_dev, key, !(status & CAMERA_STATUS), false);
+               ret = true;
+               break;
        default:
                dev_dbg(&priv->wdev->dev, "unknown event type 0x%04x 0x%04x\n", type, code);
        }
@@ -292,7 +296,7 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 {
        struct privacy_wmi_data *priv;
        struct key_entry *keymap;
-       int ret, i;
+       int ret, i, j;
 
        ret = wmi_has_guid(DELL_PRIVACY_GUID);
        if (!ret)
@@ -304,6 +308,11 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
 
        dev_set_drvdata(&wdev->dev, priv);
        priv->wdev = wdev;
+
+       ret = get_current_status(priv->wdev);
+       if (ret)
+               return ret;
+
        /* create evdev passing interface */
        priv->input_dev = devm_input_allocate_device(&wdev->dev);
        if (!priv->input_dev)
@@ -318,9 +327,20 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        /* remap the keymap code with Dell privacy key type 0x12 as prefix
         * KEY_MICMUTE scancode will be reported as 0x120001
         */
-       for (i = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
-               keymap[i] = dell_wmi_keymap_type_0012[i];
-               keymap[i].code |= (0x0012 << 16);
+       for (i = 0, j = 0; i < ARRAY_SIZE(dell_wmi_keymap_type_0012); i++) {
+               /*
+                * Unlike keys where only presses matter, userspace may act
+                * on switches in both of their positions. Only register
+                * SW_CAMERA_LENS_COVER if it is actually there.
+                */
+               if (dell_wmi_keymap_type_0012[i].type == KE_VSW &&
+                   dell_wmi_keymap_type_0012[i].sw.code == SW_CAMERA_LENS_COVER &&
+                   !(priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA)))
+                       continue;
+
+               keymap[j] = dell_wmi_keymap_type_0012[i];
+               keymap[j].code |= (0x0012 << 16);
+               j++;
        }
        ret = sparse_keymap_setup(priv->input_dev, keymap, NULL);
        kfree(keymap);
@@ -331,11 +351,12 @@ static int dell_privacy_wmi_probe(struct wmi_device *wdev, const void *context)
        priv->input_dev->name = "Dell Privacy Driver";
        priv->input_dev->id.bustype = BUS_HOST;
 
-       ret = input_register_device(priv->input_dev);
-       if (ret)
-               return ret;
+       /* Report initial camera-cover status */
+       if (priv->features_present & BIT(DELL_PRIVACY_TYPE_CAMERA))
+               input_report_switch(priv->input_dev, SW_CAMERA_LENS_COVER,
+                                   !(priv->last_status & CAMERA_STATUS));
 
-       ret = get_current_status(priv->wdev);
+       ret = input_register_device(priv->input_dev);
        if (ret)
                return ret;
 
index 435d2d3d903b07af3c7fcca2a4d7e70af7b85610..0eb5bfdd823a1d58bd3d4e153048261ccd664221 100644 (file)
@@ -1621,6 +1621,12 @@ static const struct dmi_system_id set_fn_lock_led_list[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"),
                }
        },
+       {
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion 5 15ARH05"),
+               }
+       },
        {}
 };
 
index b2342b3d78c7234303bb6155005f00b82c797cea..74dc2cff799eec514f71ac996b06fee6c92bed2e 100644 (file)
@@ -181,6 +181,9 @@ int skl_int3472_register_regulator(struct int3472_discrete_device *int3472,
                return PTR_ERR(int3472->regulator.gpio);
        }
 
+       /* Ensure the pin is in output mode and non-active state */
+       gpiod_direction_output(int3472->regulator.gpio, 0);
+
        cfg.dev = &int3472->adev->dev;
        cfg.init_data = &init_data;
        cfg.ena_gpiod = int3472->regulator.gpio;
index 974a132db651647ee10cb8a73fcc03b0002d54fc..c42c3faa2c32da504f6c0757588b5ff77bb9bc32 100644 (file)
@@ -168,6 +168,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.ena_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.ena_gpio, 0);
                break;
        case INT3472_GPIO_TYPE_PRIVACY_LED:
                gpio = acpi_get_and_request_gpiod(path, pin, "int3472,privacy-led");
@@ -175,6 +177,8 @@ static int skl_int3472_map_gpio_to_clk(struct int3472_discrete_device *int3472,
                        return (PTR_ERR(gpio));
 
                int3472->clock.led_gpio = gpio;
+               /* Ensure the pin is in output mode and non-active state */
+               gpiod_direction_output(int3472->clock.led_gpio, 0);
                break;
        default:
                dev_err(int3472->dev, "Invalid GPIO type 0x%02x for clock\n", type);
index f1d802f6ec3f9172605b385e86dafbfbd2de09b8..3a15d32d7644c000e009c801261318cf9da794dd 100644 (file)
@@ -1029,6 +1029,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE,          adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S,        adl_core_init),
        X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE,          mtl_core_init),
+       X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L,        mtl_core_init),
        {}
 };
 
index ca76076fc706a62def16d7bdd0b69baca7a3adfd..b3622419cd1a4860b31f053c4f27ec57ded772b5 100644 (file)
@@ -46,7 +46,8 @@ static struct {
        {SIMATIC_IPC_IPC427D, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_NONE},
        {SIMATIC_IPC_IPC427E, SIMATIC_IPC_DEVICE_427E, SIMATIC_IPC_DEVICE_427E},
        {SIMATIC_IPC_IPC477E, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_427E},
-       {SIMATIC_IPC_IPC427G, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCBX_39A, SIMATIC_IPC_DEVICE_227G, SIMATIC_IPC_DEVICE_227G},
+       {SIMATIC_IPC_IPCPX_39A, SIMATIC_IPC_DEVICE_NONE, SIMATIC_IPC_DEVICE_227G},
 };
 
 static int register_platform_devices(u32 station_id)
index 7156ae2ad1962a750ef95c424d6068b170f86661..537d6a2d0781b741ce1839c501fbd5b1af1da1dc 100644 (file)
@@ -1887,14 +1887,21 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
                break;
        }
 
-       ret = sony_call_snc_handle(handle, probe_base, &result);
-       if (ret)
-               return ret;
+       /*
+        * Only probe if there is a separate probe_base, otherwise the probe call
+        * is equivalent to __sony_nc_kbd_backlight_mode_set(0), resulting in
+        * the keyboard backlight being turned off.
+        */
+       if (probe_base) {
+               ret = sony_call_snc_handle(handle, probe_base, &result);
+               if (ret)
+                       return ret;
 
-       if ((handle == 0x0137 && !(result & 0x02)) ||
-                       !(result & 0x01)) {
-               dprintk("no backlight keyboard found\n");
-               return 0;
+               if ((handle == 0x0137 && !(result & 0x02)) ||
+                               !(result & 0x01)) {
+                       dprintk("no backlight keyboard found\n");
+                       return 0;
+               }
        }
 
        kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
index 1195293b22fddf6065d7f7428b3e98547b86ef3b..a95946800ae9d3f947acbf752f69afdcd3114210 100644 (file)
@@ -10311,9 +10311,11 @@ static DEFINE_MUTEX(dytc_mutex);
 static int dytc_capabilities;
 static bool dytc_mmc_get_available;
 
-static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *profile)
+static int convert_dytc_to_profile(int funcmode, int dytcmode,
+               enum platform_profile_option *profile)
 {
-       if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
+       switch (funcmode) {
+       case DYTC_FUNCTION_MMC:
                switch (dytcmode) {
                case DYTC_MODE_MMC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10329,8 +10331,7 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                        return -EINVAL;
                }
                return 0;
-       }
-       if (dytc_capabilities & BIT(DYTC_FC_PSC)) {
+       case DYTC_FUNCTION_PSC:
                switch (dytcmode) {
                case DYTC_MODE_PSC_LOWPOWER:
                        *profile = PLATFORM_PROFILE_LOW_POWER;
@@ -10344,6 +10345,14 @@ static int convert_dytc_to_profile(int dytcmode, enum platform_profile_option *p
                default: /* Unknown mode */
                        return -EINVAL;
                }
+               return 0;
+       case DYTC_FUNCTION_AMT:
+               /* For now return balanced. It's the closest we have to 'auto' */
+               *profile =  PLATFORM_PROFILE_BALANCED;
+               return 0;
+       default:
+               /* Unknown function */
+               return -EOPNOTSUPP;
        }
        return 0;
 }
@@ -10492,6 +10501,7 @@ static int dytc_profile_set(struct platform_profile_handler *pprof,
                err = dytc_command(DYTC_SET_COMMAND(DYTC_FUNCTION_PSC, perfmode, 1), &output);
                if (err)
                        goto unlock;
+
                /* system supports AMT, activate it when on balanced */
                if (dytc_capabilities & BIT(DYTC_FC_AMT))
                        dytc_control_amt(profile == PLATFORM_PROFILE_BALANCED);
@@ -10507,7 +10517,7 @@ static void dytc_profile_refresh(void)
 {
        enum platform_profile_option profile;
        int output, err = 0;
-       int perfmode;
+       int perfmode, funcmode;
 
        mutex_lock(&dytc_mutex);
        if (dytc_capabilities & BIT(DYTC_FC_MMC)) {
@@ -10522,8 +10532,9 @@ static void dytc_profile_refresh(void)
        if (err)
                return;
 
+       funcmode = (output >> DYTC_GET_FUNCTION_BIT) & 0xF;
        perfmode = (output >> DYTC_GET_MODE_BIT) & 0xF;
-       convert_dytc_to_profile(perfmode, &profile);
+       convert_dytc_to_profile(funcmode, perfmode, &profile);
        if (profile != dytc_current_profile) {
                dytc_current_profile = profile;
                platform_profile_notify();
index baae3120efd0525f19ced2c2e21b2520ddb32652..f00995390fdfe0ef91b60f5ad5ad7bc2c1d08580 100644 (file)
@@ -264,6 +264,23 @@ static const struct ts_dmi_data connect_tablet9_data = {
        .properties     = connect_tablet9_props,
 };
 
+static const struct property_entry csl_panther_tab_hd_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 20),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1526),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-csl-panther-tab-hd.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       { }
+};
+
+static const struct ts_dmi_data csl_panther_tab_hd_data = {
+       .acpi_name      = "MSSL1680:00",
+       .properties     = csl_panther_tab_hd_props,
+};
+
 static const struct property_entry cube_iwork8_air_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-min-x", 1),
        PROPERTY_ENTRY_U32("touchscreen-min-y", 3),
@@ -1124,6 +1141,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Tablet 9"),
                },
        },
+       {
+               /* CSL Panther Tab HD */
+               .driver_data = (void *)&csl_panther_tab_hd_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "CSL Computer GmbH & Co. KG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "CSL Panther Tab HD"),
+               },
+       },
        {
                /* CUBE iwork8 Air */
                .driver_data = (void *)&cube_iwork8_air_data,
index e01b32d1fa17d428ded0a718a3dea03a1096b3f9..00828f5baa972a3d8bdf5f37583458fddd447009 100644 (file)
@@ -498,6 +498,12 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
 
        chip->chip_irq = i2c->irq;
 
+       ret = da9211_regulator_init(chip);
+       if (ret < 0) {
+               dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
+               return ret;
+       }
+
        if (chip->chip_irq != 0) {
                ret = devm_request_threaded_irq(chip->dev, chip->chip_irq, NULL,
                                        da9211_irq_handler,
@@ -512,11 +518,6 @@ static int da9211_i2c_probe(struct i2c_client *i2c)
                dev_warn(chip->dev, "No IRQ configured\n");
        }
 
-       ret = da9211_regulator_init(chip);
-
-       if (ret < 0)
-               dev_err(chip->dev, "Failed to initialize regulator: %d\n", ret);
-
        return ret;
 }
 
index 43b5b93777149b2f079593e787438d96f46dca56..ae6021390143c60131579f2413858da665146ad5 100644 (file)
@@ -1016,7 +1016,7 @@ static const struct rpmh_vreg_init_data pm8550_vreg_data[] = {
        RPMH_VREG("ldo8",   "ldo%s8",  &pmic5_pldo_lv, "vdd-l8-l9"),
        RPMH_VREG("ldo9",   "ldo%s9",  &pmic5_pldo,    "vdd-l8-l9"),
        RPMH_VREG("ldo10",  "ldo%s10", &pmic5_nldo,    "vdd-l1-l4-l10"),
-       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_pldo,    "vdd-l11"),
+       RPMH_VREG("ldo11",  "ldo%s11", &pmic5_nldo,    "vdd-l11"),
        RPMH_VREG("ldo12",  "ldo%s12", &pmic5_pldo,    "vdd-l12"),
        RPMH_VREG("ldo13",  "ldo%s13", &pmic5_pldo,    "vdd-l2-l13-l14"),
        RPMH_VREG("ldo14",  "ldo%s14", &pmic5_pldo,    "vdd-l2-l13-l14"),
index de176c2fbad96d3165fba9172ce71a0531ba8d3b..2a52c990d4fecbe7d6044388bfc430e24b66fb3f 100644 (file)
@@ -257,7 +257,7 @@ config RESET_SUNXI
 
 config RESET_TI_SCI
        tristate "TI System Control Interface (TI-SCI) reset driver"
-       depends on TI_SCI_PROTOCOL || COMPILE_TEST
+       depends on TI_SCI_PROTOCOL || (COMPILE_TEST && TI_SCI_PROTOCOL=n)
        help
          This enables the reset driver support over TI System Control Interface
          available on some new TI's SoCs. If you wish to use reset resources
index 146fd5d45e99d3e68695965888f3949ceb7bc0b7..15abac9fc72c074adaeec0a8f98ce73d1355d26a 100644 (file)
@@ -47,7 +47,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct uniphier_glue_reset_priv *priv;
        struct resource *res;
-       resource_size_t size;
        int i, ret;
 
        priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -60,7 +59,6 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
                return -EINVAL;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       size = resource_size(res);
        priv->rdata.membase = devm_ioremap_resource(dev, res);
        if (IS_ERR(priv->rdata.membase))
                return PTR_ERR(priv->rdata.membase);
@@ -96,7 +94,7 @@ static int uniphier_glue_reset_probe(struct platform_device *pdev)
 
        spin_lock_init(&priv->rdata.lock);
        priv->rdata.rcdev.owner = THIS_MODULE;
-       priv->rdata.rcdev.nr_resets = size * BITS_PER_BYTE;
+       priv->rdata.rcdev.nr_resets = resource_size(res) * BITS_PER_BYTE;
        priv->rdata.rcdev.ops = &reset_simple_ops;
        priv->rdata.rcdev.of_node = dev->of_node;
        priv->rdata.active_low = true;
index b392b9f5482e0e1f3c3e3c67118876981b7cfe7a..c0f85ffb2b62de7ae80c32b456fc49673e57e50e 100644 (file)
@@ -865,6 +865,8 @@ dcssblk_submit_bio(struct bio *bio)
        unsigned long bytes_done;
 
        bio = bio_split_to_limits(bio);
+       if (!bio)
+               return;
 
        bytes_done = 0;
        dev_info = bio->bi_bdev->bd_disk->private_data;
index 406be169173ce914db9dd593f6d70e5b52427961..d1adc4b831936525e2aef6ee4bc0df0f9ab66de3 100644 (file)
@@ -410,13 +410,13 @@ static ssize_t qeth_dev_isolation_show(struct device *dev,
 
        switch (card->options.isolation) {
        case ISOLATION_MODE_NONE:
-               return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
+               return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_NONE);
        case ISOLATION_MODE_FWD:
-               return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
+               return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_FWD);
        case ISOLATION_MODE_DROP:
-               return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
+               return sysfs_emit(buf, "%s\n", ATTR_QETH_ISOLATION_DROP);
        default:
-               return snprintf(buf, 5, "%s\n", "N/A");
+               return sysfs_emit(buf, "%s\n", "N/A");
        }
 }
 
@@ -500,9 +500,9 @@ static ssize_t qeth_hw_trap_show(struct device *dev,
        struct qeth_card *card = dev_get_drvdata(dev);
 
        if (card->info.hwtrap)
-               return snprintf(buf, 5, "arm\n");
+               return sysfs_emit(buf, "arm\n");
        else
-               return snprintf(buf, 8, "disarm\n");
+               return sysfs_emit(buf, "disarm\n");
 }
 
 static ssize_t qeth_hw_trap_store(struct device *dev,
index 41ba22f6c7f051cbf58abcebf322cda7527a0c70..8c038ccf1c0954f2c239ecf7e31754abf08e529a 100644 (file)
@@ -162,7 +162,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
 static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
 {
        if (hisi_hba->hw->slot_index_alloc ||
-           slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
+           slot_idx < HISI_SAS_RESERVED_IPTT) {
                spin_lock(&hisi_hba->lock);
                hisi_sas_slot_index_clear(hisi_hba, slot_idx);
                spin_unlock(&hisi_hba->lock);
@@ -704,7 +704,7 @@ static int hisi_sas_init_device(struct domain_device *device)
                int_to_scsilun(0, &lun);
 
                while (retry-- > 0) {
-                       rc = sas_clear_task_set(device, lun.scsi_lun);
+                       rc = sas_abort_task_set(device, lun.scsi_lun);
                        if (rc == TMF_RESP_FUNC_COMPLETE) {
                                hisi_sas_release_task(hisi_hba, device);
                                break;
@@ -1316,7 +1316,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
                                device->linkrate = phy->sas_phy.linkrate;
 
                        hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
-               } else
+               } else if (!port->port_attached)
                        port->id = 0xff;
        }
 }
index 1ccce706167a5b054c987f38575f65ef08494f70..5e80225b5308228b753c7e0bb1ac4a598ea89c9d 100644 (file)
@@ -889,7 +889,9 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
 {
        struct ata_port *ap = device->sata_dev.ap;
        struct ata_link *link = &ap->link;
+       unsigned long flags;
 
+       spin_lock_irqsave(ap->lock, flags);
        device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */
        device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */
 
@@ -897,6 +899,7 @@ void sas_ata_device_link_abort(struct domain_device *device, bool force_reset)
        if (force_reset)
                link->eh_info.action |= ATA_EH_RESET;
        ata_link_abort(link);
+       spin_unlock_irqrestore(ap->lock, flags);
 }
 EXPORT_SYMBOL_GPL(sas_ata_device_link_abort);
 
index ef86ca46646b8f93a72adee0aff2c5343d220c3b..3bf8cf34e1c3f943d00043fcee13143e46af5f3c 100644 (file)
@@ -1,5 +1,5 @@
 # mpi3mr makefile
-obj-m += mpi3mr.o
+obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr.o
 mpi3mr-y +=  mpi3mr_os.o     \
                mpi3mr_fw.o \
                mpi3mr_app.o \
index 0c4aabaefdcc4c34eba84223093668444c4f6cf0..286a44506578b40665e1a45e0e0c88da9267b3b3 100644 (file)
@@ -3633,8 +3633,7 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
        int i, retval = 0, capb = 0;
        u16 message_control;
        u64 dma_mask = mrioc->dma_mask ? mrioc->dma_mask :
-           (((dma_get_required_mask(&pdev->dev) > DMA_BIT_MASK(32)) &&
-           (sizeof(dma_addr_t) > 4)) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
+           ((sizeof(dma_addr_t) > 4) ? DMA_BIT_MASK(64) : DMA_BIT_MASK(32));
 
        if (pci_enable_device_mem(pdev)) {
                ioc_err(mrioc, "pci_enable_device_mem: failed\n");
index 4e981ccaac4163ce9dcca43808b73d3594731162..69061545d9d2f1ec234cf059b1e3c4bd2ed1f40b 100644 (file)
@@ -2992,8 +2992,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
        struct sysinfo s;
        u64 coherent_dma_mask, dma_mask;
 
-       if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 ||
-           dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) {
+       if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4) {
                ioc->dma_mask = 32;
                coherent_dma_mask = dma_mask = DMA_BIT_MASK(32);
        /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
index cc6953809a248a2b8def1e71b567fe7f643e2244..8553277effb3d8ea2a3bd67d2ae6cc5710a4dd60 100644 (file)
@@ -1511,8 +1511,6 @@ static int inquiry_vpd_b0(unsigned char *arr)
        put_unaligned_be64(sdebug_write_same_length, &arr[32]);
 
        return 0x3c; /* Mandatory page length for Logical Block Provisioning */
-
-       return sizeof(vpdb0_data);
 }
 
 /* Block device characteristics VPD page (SBC-3) */
index a7960ad2d386a466719e5548a70e7f34b18db5f3..2aa2c2aee6e7f99cc3275244d5f70d72dc02a09e 100644 (file)
@@ -231,6 +231,11 @@ scsi_abort_command(struct scsi_cmnd *scmd)
        struct Scsi_Host *shost = sdev->host;
        unsigned long flags;
 
+       if (!shost->hostt->eh_abort_handler) {
+               /* No abort handler, fail command directly */
+               return FAILED;
+       }
+
        if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
                /*
                 * Retry after abort failed, escalate to next level.
index 13cfd3e317cc0690c212cb910e8b1bd6efe23854..b9b97300e3b3ca6b566813e1d6664da56bfdbc93 100644 (file)
@@ -1677,6 +1677,13 @@ static const char *iscsi_session_state_name(int state)
        return name;
 }
 
+static char *iscsi_session_target_state_name[] = {
+       [ISCSI_SESSION_TARGET_UNBOUND]   = "UNBOUND",
+       [ISCSI_SESSION_TARGET_ALLOCATED] = "ALLOCATED",
+       [ISCSI_SESSION_TARGET_SCANNED]   = "SCANNED",
+       [ISCSI_SESSION_TARGET_UNBINDING] = "UNBINDING",
+};
+
 int iscsi_session_chkready(struct iscsi_cls_session *session)
 {
        int err;
@@ -1786,9 +1793,13 @@ static int iscsi_user_scan_session(struct device *dev, void *data)
                if ((scan_data->channel == SCAN_WILD_CARD ||
                     scan_data->channel == 0) &&
                    (scan_data->id == SCAN_WILD_CARD ||
-                    scan_data->id == id))
+                    scan_data->id == id)) {
                        scsi_scan_target(&session->dev, 0, id,
                                         scan_data->lun, scan_data->rescan);
+                       spin_lock_irqsave(&session->lock, flags);
+                       session->target_state = ISCSI_SESSION_TARGET_SCANNED;
+                       spin_unlock_irqrestore(&session->lock, flags);
+               }
        }
 
 user_scan_exit:
@@ -1961,31 +1972,41 @@ static void __iscsi_unbind_session(struct work_struct *work)
        struct iscsi_cls_host *ihost = shost->shost_data;
        unsigned long flags;
        unsigned int target_id;
+       bool remove_target = true;
 
        ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
 
        /* Prevent new scans and make sure scanning is not in progress */
        mutex_lock(&ihost->mutex);
        spin_lock_irqsave(&session->lock, flags);
-       if (session->target_id == ISCSI_MAX_TARGET) {
+       if (session->target_state == ISCSI_SESSION_TARGET_ALLOCATED) {
+               remove_target = false;
+       } else if (session->target_state != ISCSI_SESSION_TARGET_SCANNED) {
                spin_unlock_irqrestore(&session->lock, flags);
                mutex_unlock(&ihost->mutex);
-               goto unbind_session_exit;
+               ISCSI_DBG_TRANS_SESSION(session,
+                       "Skipping target unbinding: Session is unbound/unbinding.\n");
+               return;
        }
 
+       session->target_state = ISCSI_SESSION_TARGET_UNBINDING;
        target_id = session->target_id;
        session->target_id = ISCSI_MAX_TARGET;
        spin_unlock_irqrestore(&session->lock, flags);
        mutex_unlock(&ihost->mutex);
 
-       scsi_remove_target(&session->dev);
+       if (remove_target)
+               scsi_remove_target(&session->dev);
 
        if (session->ida_used)
                ida_free(&iscsi_sess_ida, target_id);
 
-unbind_session_exit:
        iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
        ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
+
+       spin_lock_irqsave(&session->lock, flags);
+       session->target_state = ISCSI_SESSION_TARGET_UNBOUND;
+       spin_unlock_irqrestore(&session->lock, flags);
 }
 
 static void __iscsi_destroy_session(struct work_struct *work)
@@ -2062,6 +2083,9 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
                session->ida_used = true;
        } else
                session->target_id = target_id;
+       spin_lock_irqsave(&session->lock, flags);
+       session->target_state = ISCSI_SESSION_TARGET_ALLOCATED;
+       spin_unlock_irqrestore(&session->lock, flags);
 
        dev_set_name(&session->dev, "session%u", session->sid);
        err = device_add(&session->dev);
@@ -4369,6 +4393,19 @@ iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
 iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
 iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
 
+static ssize_t
+show_priv_session_target_state(struct device *dev, struct device_attribute *attr,
+                       char *buf)
+{
+       struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+
+       return sysfs_emit(buf, "%s\n",
+                       iscsi_session_target_state_name[session->target_state]);
+}
+
+static ISCSI_CLASS_ATTR(priv_sess, target_state, S_IRUGO,
+                       show_priv_session_target_state, NULL);
+
 static ssize_t
 show_priv_session_state(struct device *dev, struct device_attribute *attr,
                        char *buf)
@@ -4471,6 +4508,7 @@ static struct attribute *iscsi_session_attrs[] = {
        &dev_attr_sess_boot_target.attr,
        &dev_attr_priv_sess_recovery_tmo.attr,
        &dev_attr_priv_sess_state.attr,
+       &dev_attr_priv_sess_target_state.attr,
        &dev_attr_priv_sess_creator.attr,
        &dev_attr_sess_chap_out_idx.attr,
        &dev_attr_sess_chap_in_idx.attr,
@@ -4584,6 +4622,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
                return S_IRUGO | S_IWUSR;
        else if (attr == &dev_attr_priv_sess_state.attr)
                return S_IRUGO;
+       else if (attr == &dev_attr_priv_sess_target_state.attr)
+               return S_IRUGO;
        else if (attr == &dev_attr_priv_sess_creator.attr)
                return S_IRUGO;
        else if (attr == &dev_attr_priv_sess_target_id.attr)
index d7a84c0bfaeb73570523b95f6bb4ea82a4d06b66..22705eb781b0e3138fc681c1836d9e099cc33271 100644 (file)
@@ -1823,6 +1823,9 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        ret = storvsc_do_io(dev, cmd_request, get_cpu());
        put_cpu();
 
+       if (ret)
+               scsi_dma_unmap(scmnd);
+
        if (ret == -EAGAIN) {
                /* no more space */
                ret = SCSI_MLQUEUE_DEVICE_BUSY;
index 66b316d173b0b4155b26c2d64d5d5213e3c6fdec..71a3bb83984c0097ce440977b9b0d792dd3b3009 100644 (file)
@@ -995,7 +995,7 @@ static int scsifront_suspend(struct xenbus_device *dev)
        return err;
 }
 
-static int scsifront_remove(struct xenbus_device *dev)
+static void scsifront_remove(struct xenbus_device *dev)
 {
        struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
 
@@ -1011,8 +1011,6 @@ static int scsifront_remove(struct xenbus_device *dev)
 
        scsifront_free_ring(info);
        scsi_host_put(info->host);
-
-       return 0;
 }
 
 static void scsifront_disconnect(struct vscsifrnt_info *info)
index cd44f17dad3d04cd3ec1f6b8d8614e3fb9828456..d51abb462ae5d3481d26a0bdc6ababe603d7ca26 100644 (file)
@@ -461,9 +461,10 @@ static int apr_add_device(struct device *dev, struct device_node *np,
                goto out;
        }
 
+       /* Protection domain is optional, it does not exist on older platforms */
        ret = of_property_read_string_index(np, "qcom,protection-domain",
                                            1, &adev->service_path);
-       if (ret < 0) {
+       if (ret < 0 && ret != -EINVAL) {
                dev_err(dev, "Failed to read second value of qcom,protection-domain\n");
                goto out;
        }
index e9b854ed1bdfd645caaa235fc5d18f0fb8362488..144ea68e0920a31da9476293a2075ab30ac75e36 100644 (file)
@@ -1708,12 +1708,16 @@ static int cpr_probe(struct platform_device *pdev)
 
        ret = of_genpd_add_provider_simple(dev->of_node, &drv->pd);
        if (ret)
-               return ret;
+               goto err_remove_genpd;
 
        platform_set_drvdata(pdev, drv);
        cpr_debugfs_init(drv);
 
        return 0;
+
+err_remove_genpd:
+       pm_genpd_remove(&drv->pd);
+       return ret;
 }
 
 static int cpr_remove(struct platform_device *pdev)
index 520b4cc69cdc956cfe6e3032196d547756f60d1b..91db3c973167b0e319274cad3024c85cb50840bb 100644 (file)
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
                ((op)->data.nbytes >> 16) & 0xffff) | \
-       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
+       FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, \
+                 (op)->dummy.buswidth != 0 ? \
+                 (((op)->dummy.nbytes * 8) / (op)->dummy.buswidth) : \
+                 0))
 
 #define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
        FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
index 6de8360e5c2a90a24d7035891745709f2b6de489..9eab6c20dbc561ca499ef5c7becc3982164121cc 100644 (file)
@@ -1253,6 +1253,11 @@ static int mtk_spi_probe(struct platform_device *pdev)
                dev_notice(dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
                           addr_bits, ret);
 
+       ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
+                              IRQF_TRIGGER_NONE, dev_name(dev), master);
+       if (ret)
+               return dev_err_probe(dev, ret, "failed to register irq\n");
+
        pm_runtime_enable(dev);
 
        ret = devm_spi_register_master(dev, master);
@@ -1261,13 +1266,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
                return dev_err_probe(dev, ret, "failed to register master\n");
        }
 
-       ret = devm_request_irq(dev, irq, mtk_spi_interrupt,
-                              IRQF_TRIGGER_NONE, dev_name(dev), master);
-       if (ret) {
-               pm_runtime_disable(dev);
-               return dev_err_probe(dev, ret, "failed to register irq\n");
-       }
-
        return 0;
 }
 
index 3cc7bb4d03decd72e3accf1201d33daa2a1a00c4..15f174f4e0561a686674162d6011c0c119cc278f 100644 (file)
@@ -2310,7 +2310,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
        if (!of_property_read_u32(nc, "spi-max-frequency", &value))
                spi->max_speed_hz = value;
 
-       if (!of_property_read_u16(nc, "spi-cs-setup-ns", &cs_setup)) {
+       if (!of_property_read_u16(nc, "spi-cs-setup-delay-ns", &cs_setup)) {
                spi->cs_setup.value = cs_setup;
                spi->cs_setup.unit = SPI_DELAY_UNIT_NSECS;
        }
index 6313e7d0cdf8715cc2746bc4681fbcbaf70de2e3..1935ca61344706a60f9c497d8d2c4052414fe12d 100644 (file)
@@ -68,7 +68,7 @@ static_assert(N_SPI_MINORS > 0 && N_SPI_MINORS <= 256);
 
 struct spidev_data {
        dev_t                   devt;
-       spinlock_t              spi_lock;
+       struct mutex            spi_lock;
        struct spi_device       *spi;
        struct list_head        device_entry;
 
@@ -95,9 +95,8 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
        int status;
        struct spi_device *spi;
 
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spidev->spi;
-       spin_unlock_irq(&spidev->spi_lock);
 
        if (spi == NULL)
                status = -ESHUTDOWN;
@@ -107,6 +106,7 @@ spidev_sync(struct spidev_data *spidev, struct spi_message *message)
        if (status == 0)
                status = message->actual_length;
 
+       mutex_unlock(&spidev->spi_lock);
        return status;
 }
 
@@ -359,12 +359,12 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
         * we issue this ioctl.
         */
        spidev = filp->private_data;
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spi_dev_get(spidev->spi);
-       spin_unlock_irq(&spidev->spi_lock);
-
-       if (spi == NULL)
+       if (spi == NULL) {
+               mutex_unlock(&spidev->spi_lock);
                return -ESHUTDOWN;
+       }
 
        /* use the buffer lock here for triple duty:
         *  - prevent I/O (from us) so calling spi_setup() is safe;
@@ -508,6 +508,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 
        mutex_unlock(&spidev->buf_lock);
        spi_dev_put(spi);
+       mutex_unlock(&spidev->spi_lock);
        return retval;
 }
 
@@ -529,12 +530,12 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
         * we issue this ioctl.
         */
        spidev = filp->private_data;
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spi = spi_dev_get(spidev->spi);
-       spin_unlock_irq(&spidev->spi_lock);
-
-       if (spi == NULL)
+       if (spi == NULL) {
+               mutex_unlock(&spidev->spi_lock);
                return -ESHUTDOWN;
+       }
 
        /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
        mutex_lock(&spidev->buf_lock);
@@ -561,6 +562,7 @@ spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
 done:
        mutex_unlock(&spidev->buf_lock);
        spi_dev_put(spi);
+       mutex_unlock(&spidev->spi_lock);
        return retval;
 }
 
@@ -601,7 +603,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
        if (!spidev->tx_buffer) {
                spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
                if (!spidev->tx_buffer) {
-                       dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
                        status = -ENOMEM;
                        goto err_find_dev;
                }
@@ -610,7 +611,6 @@ static int spidev_open(struct inode *inode, struct file *filp)
        if (!spidev->rx_buffer) {
                spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
                if (!spidev->rx_buffer) {
-                       dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
                        status = -ENOMEM;
                        goto err_alloc_rx_buf;
                }
@@ -640,10 +640,10 @@ static int spidev_release(struct inode *inode, struct file *filp)
        spidev = filp->private_data;
        filp->private_data = NULL;
 
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        /* ... after we unbound from the underlying device? */
        dofree = (spidev->spi == NULL);
-       spin_unlock_irq(&spidev->spi_lock);
+       mutex_unlock(&spidev->spi_lock);
 
        /* last close? */
        spidev->users--;
@@ -776,7 +776,7 @@ static int spidev_probe(struct spi_device *spi)
 
        /* Initialize the driver data */
        spidev->spi = spi;
-       spin_lock_init(&spidev->spi_lock);
+       mutex_init(&spidev->spi_lock);
        mutex_init(&spidev->buf_lock);
 
        INIT_LIST_HEAD(&spidev->device_entry);
@@ -821,9 +821,9 @@ static void spidev_remove(struct spi_device *spi)
        /* prevent new opens */
        mutex_lock(&device_list_lock);
        /* make sure ops on existing fds can abort cleanly */
-       spin_lock_irq(&spidev->spi_lock);
+       mutex_lock(&spidev->spi_lock);
        spidev->spi = NULL;
-       spin_unlock_irq(&spidev->spi_lock);
+       mutex_unlock(&spidev->spi_lock);
 
        list_del(&spidev->device_entry);
        device_destroy(spidev_class, spidev->devt);
index db1441c0cc662092fd238cdf8116b6c6def3b880..690ab7165b2c18cddcfcb194cef1c6b3f3571b69 100644 (file)
@@ -86,7 +86,7 @@ struct vchiq_service_params_kernel {
 
 struct vchiq_instance;
 
-extern enum vchiq_status vchiq_initialise(struct vchiq_instance **pinstance);
+extern int vchiq_initialise(struct vchiq_instance **pinstance);
 extern enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance);
 extern enum vchiq_status vchiq_connect(struct vchiq_instance *instance);
 extern enum vchiq_status vchiq_open_service(struct vchiq_instance *instance,
index 2851ef6b9cd0f628ea17de94861bfebfe1b22c35..cd20eb18f27513cbe19efd025cfed2ae334e3fe7 100644 (file)
@@ -100,10 +100,10 @@ vchiq_dump_platform_use_state(struct vchiq_state *state);
 extern void
 vchiq_dump_service_use_state(struct vchiq_state *state);
 
-extern enum vchiq_status
+extern int
 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
                   enum USE_TYPE_E use_type);
-extern enum vchiq_status
+extern int
 vchiq_release_internal(struct vchiq_state *state,
                       struct vchiq_service *service);
 
index 8c42e76620333122d3e377c3f81089982b194c9c..92ed1213fe37988e537926ed104e93c0b312d96d 100644 (file)
@@ -172,6 +172,7 @@ static const struct attribute_group fivr_attribute_group = {
 RFIM_SHOW(rfi_restriction_run_busy, 1)
 RFIM_SHOW(rfi_restriction_err_code, 1)
 RFIM_SHOW(rfi_restriction_data_rate, 1)
+RFIM_SHOW(rfi_restriction_data_rate_base, 1)
 RFIM_SHOW(ddr_data_rate_point_0, 1)
 RFIM_SHOW(ddr_data_rate_point_1, 1)
 RFIM_SHOW(ddr_data_rate_point_2, 1)
@@ -181,11 +182,13 @@ RFIM_SHOW(rfi_disable, 1)
 RFIM_STORE(rfi_restriction_run_busy, 1)
 RFIM_STORE(rfi_restriction_err_code, 1)
 RFIM_STORE(rfi_restriction_data_rate, 1)
+RFIM_STORE(rfi_restriction_data_rate_base, 1)
 RFIM_STORE(rfi_disable, 1)
 
 static DEVICE_ATTR_RW(rfi_restriction_run_busy);
 static DEVICE_ATTR_RW(rfi_restriction_err_code);
 static DEVICE_ATTR_RW(rfi_restriction_data_rate);
+static DEVICE_ATTR_RW(rfi_restriction_data_rate_base);
 static DEVICE_ATTR_RO(ddr_data_rate_point_0);
 static DEVICE_ATTR_RO(ddr_data_rate_point_1);
 static DEVICE_ATTR_RO(ddr_data_rate_point_2);
@@ -248,6 +251,7 @@ static struct attribute *dvfs_attrs[] = {
        &dev_attr_rfi_restriction_run_busy.attr,
        &dev_attr_rfi_restriction_err_code.attr,
        &dev_attr_rfi_restriction_data_rate.attr,
+       &dev_attr_rfi_restriction_data_rate_base.attr,
        &dev_attr_ddr_data_rate_point_0.attr,
        &dev_attr_ddr_data_rate_point_1.attr,
        &dev_attr_ddr_data_rate_point_2.attr,
index f17ab2316dbd0f36b79d89d10852d0bf683d0170..77bd47d976a21c237298b24e948b1535db4037b7 100644 (file)
@@ -909,15 +909,20 @@ __thermal_cooling_device_register(struct device_node *np,
        cdev->devdata = devdata;
 
        ret = cdev->ops->get_max_state(cdev, &cdev->max_state);
-       if (ret)
-               goto out_kfree_type;
+       if (ret) {
+               kfree(cdev->type);
+               goto out_ida_remove;
+       }
 
        thermal_cooling_device_setup_sysfs(cdev);
+
        ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
        if (ret) {
+               kfree(cdev->type);
                thermal_cooling_device_destroy_sysfs(cdev);
-               goto out_kfree_type;
+               goto out_ida_remove;
        }
+
        ret = device_register(&cdev->device);
        if (ret)
                goto out_kfree_type;
@@ -943,6 +948,8 @@ out_kfree_type:
        thermal_cooling_device_destroy_sysfs(cdev);
        kfree(cdev->type);
        put_device(&cdev->device);
+
+       /* thermal_release() takes care of the rest */
        cdev = NULL;
 out_ida_remove:
        ida_free(&thermal_cdev_ida, id);
index 81252e31014a18aa907c158572e6b50108a12cc7..56008eb91e2e448b9d584c318d6bc55c9af00a5b 100644 (file)
@@ -427,13 +427,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
 {
        u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
        int ret, i, last_idx = 0;
-       struct usb4_port *usb4;
-
-       usb4 = port->usb4;
-       if (!usb4)
-               return 0;
-
-       pm_runtime_get_sync(&usb4->dev);
 
        /*
         * Send broadcast RT to make sure retimer indices facing this
@@ -441,7 +434,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
         */
        ret = usb4_port_enumerate_retimers(port);
        if (ret)
-               goto out;
+               return ret;
 
        /*
         * Enable sideband channel for each retimer. We can do this
@@ -471,12 +464,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
                        break;
        }
 
-       if (!last_idx) {
-               ret = 0;
-               goto out;
-       }
+       if (!last_idx)
+               return 0;
 
        /* Add on-board retimers if they do not exist already */
+       ret = 0;
        for (i = 1; i <= last_idx; i++) {
                struct tb_retimer *rt;
 
@@ -490,10 +482,6 @@ int tb_retimer_scan(struct tb_port *port, bool add)
                }
        }
 
-out:
-       pm_runtime_mark_last_busy(&usb4->dev);
-       pm_runtime_put_autosuspend(&usb4->dev);
-
        return ret;
 }
 
index 4628458044270afca638a14d8d22f3403b1a1137..3f1ab30c4fb1589029d6a98f60f844e97a72a9a8 100644 (file)
@@ -628,11 +628,15 @@ static void tb_scan_port(struct tb_port *port)
                         * Downstream switch is reachable through two ports.
                         * Only scan on the primary port (link_nr == 0).
                         */
+
+       if (port->usb4)
+               pm_runtime_get_sync(&port->usb4->dev);
+
        if (tb_wait_for_port(port, false) <= 0)
-               return;
+               goto out_rpm_put;
        if (port->remote) {
                tb_port_dbg(port, "port already has a remote\n");
-               return;
+               goto out_rpm_put;
        }
 
        tb_retimer_scan(port, true);
@@ -647,12 +651,12 @@ static void tb_scan_port(struct tb_port *port)
                 */
                if (PTR_ERR(sw) == -EIO || PTR_ERR(sw) == -EADDRNOTAVAIL)
                        tb_scan_xdomain(port);
-               return;
+               goto out_rpm_put;
        }
 
        if (tb_switch_configure(sw)) {
                tb_switch_put(sw);
-               return;
+               goto out_rpm_put;
        }
 
        /*
@@ -681,7 +685,7 @@ static void tb_scan_port(struct tb_port *port)
 
        if (tb_switch_add(sw)) {
                tb_switch_put(sw);
-               return;
+               goto out_rpm_put;
        }
 
        /* Link the switches using both links if available */
@@ -733,6 +737,12 @@ static void tb_scan_port(struct tb_port *port)
 
        tb_add_dp_resources(sw);
        tb_scan_switch(sw);
+
+out_rpm_put:
+       if (port->usb4) {
+               pm_runtime_mark_last_busy(&port->usb4->dev);
+               pm_runtime_put_autosuspend(&port->usb4->dev);
+       }
 }
 
 static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
index 2c3cf7fc3357150d139002b7c29b55a5c1a23809..1fc3c29b24f8397a540c914fb3533342baf9a630 100644 (file)
@@ -1275,7 +1275,7 @@ static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel,
                return;
        } else if (!ret) {
                /* Use maximum link rate if the link valid is not set */
-               ret = usb4_usb3_port_max_link_rate(tunnel->src_port);
+               ret = tb_usb3_max_link_rate(tunnel->dst_port, tunnel->src_port);
                if (ret < 0) {
                        tb_tunnel_warn(tunnel, "failed to read maximum link rate\n");
                        return;
index cfa83486c9dad6892f9b8b0c3ddf14c434ac9ae5..3c51e47dd86b77786ff47a19617b06da13b5e8b4 100644 (file)
@@ -1419,12 +1419,19 @@ static int tb_xdomain_get_properties(struct tb_xdomain *xd)
         * registered, we notify the userspace that it has changed.
         */
        if (!update) {
-               struct tb_port *port;
+               /*
+                * Now disable lane 1 if bonding was not enabled. Do
+                * this only if bonding was possible at the beginning
+                * (that is we are the connection manager and there are
+                * two lanes).
+                */
+               if (xd->bonding_possible) {
+                       struct tb_port *port;
 
-               /* Now disable lane 1 if bonding was not enabled */
-               port = tb_port_at(xd->route, tb_xdomain_parent(xd));
-               if (!port->bonded)
-                       tb_port_disable(port->dual_link_port);
+                       port = tb_port_at(xd->route, tb_xdomain_parent(xd));
+                       if (!port->bonded)
+                               tb_port_disable(port->dual_link_port);
+               }
 
                if (device_add(&xd->dev)) {
                        dev_err(&xd->dev, "failed to add XDomain device\n");
index 7c23112dc923f7dc7069a63a16ed2696e533d81f..5bddb2f5e93185eca993595dd0c44f7cc7d1ec46 100644 (file)
@@ -52,17 +52,22 @@ static DEFINE_SPINLOCK(xencons_lock);
 
 static struct xencons_info *vtermno_to_xencons(int vtermno)
 {
-       struct xencons_info *entry, *n, *ret = NULL;
+       struct xencons_info *entry, *ret = NULL;
+       unsigned long flags;
 
-       if (list_empty(&xenconsoles))
-                       return NULL;
+       spin_lock_irqsave(&xencons_lock, flags);
+       if (list_empty(&xenconsoles)) {
+               spin_unlock_irqrestore(&xencons_lock, flags);
+               return NULL;
+       }
 
-       list_for_each_entry_safe(entry, n, &xenconsoles, list) {
+       list_for_each_entry(entry, &xenconsoles, list) {
                if (entry->vtermno == vtermno) {
                        ret  = entry;
                        break;
                }
        }
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return ret;
 }
@@ -223,7 +228,7 @@ static int xen_hvm_console_init(void)
 {
        int r;
        uint64_t v = 0;
-       unsigned long gfn;
+       unsigned long gfn, flags;
        struct xencons_info *info;
 
        if (!xen_hvm_domain())
@@ -258,9 +263,9 @@ static int xen_hvm_console_init(void)
                goto err;
        info->vtermno = HVC_COOKIE;
 
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 err:
@@ -283,6 +288,7 @@ static int xencons_info_pv_init(struct xencons_info *info, int vtermno)
 static int xen_pv_console_init(void)
 {
        struct xencons_info *info;
+       unsigned long flags;
 
        if (!xen_pv_domain())
                return -ENODEV;
@@ -299,9 +305,9 @@ static int xen_pv_console_init(void)
                /* already configured */
                return 0;
        }
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        xencons_info_pv_init(info, HVC_COOKIE);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 }
@@ -309,6 +315,7 @@ static int xen_pv_console_init(void)
 static int xen_initial_domain_console_init(void)
 {
        struct xencons_info *info;
+       unsigned long flags;
 
        if (!xen_initial_domain())
                return -ENODEV;
@@ -323,9 +330,9 @@ static int xen_initial_domain_console_init(void)
        info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
        info->vtermno = HVC_COOKIE;
 
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 }
@@ -380,10 +387,12 @@ static void xencons_free(struct xencons_info *info)
 
 static int xen_console_remove(struct xencons_info *info)
 {
+       unsigned long flags;
+
        xencons_disconnect_backend(info);
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_del(&info->list);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
        if (info->xbdev != NULL)
                xencons_free(info);
        else {
@@ -394,9 +403,9 @@ static int xen_console_remove(struct xencons_info *info)
        return 0;
 }
 
-static int xencons_remove(struct xenbus_device *dev)
+static void xencons_remove(struct xenbus_device *dev)
 {
-       return xen_console_remove(dev_get_drvdata(&dev->dev));
+       xen_console_remove(dev_get_drvdata(&dev->dev));
 }
 
 static int xencons_connect_backend(struct xenbus_device *dev,
@@ -464,6 +473,7 @@ static int xencons_probe(struct xenbus_device *dev,
 {
        int ret, devid;
        struct xencons_info *info;
+       unsigned long flags;
 
        devid = dev->nodename[strlen(dev->nodename) - 1] - '0';
        if (devid == 0)
@@ -482,9 +492,9 @@ static int xencons_probe(struct xenbus_device *dev,
        ret = xencons_connect_backend(dev, info);
        if (ret < 0)
                goto error;
-       spin_lock(&xencons_lock);
+       spin_lock_irqsave(&xencons_lock, flags);
        list_add_tail(&info->list, &xenconsoles);
-       spin_unlock(&xencons_lock);
+       spin_unlock_irqrestore(&xencons_lock, flags);
 
        return 0;
 
@@ -584,10 +594,12 @@ static int __init xen_hvc_init(void)
 
        info->hvc = hvc_alloc(HVC_COOKIE, info->irq, ops, 256);
        if (IS_ERR(info->hvc)) {
+               unsigned long flags;
+
                r = PTR_ERR(info->hvc);
-               spin_lock(&xencons_lock);
+               spin_lock_irqsave(&xencons_lock, flags);
                list_del(&info->list);
-               spin_unlock(&xencons_lock);
+               spin_unlock_irqrestore(&xencons_lock, flags);
                if (info->irq)
                        unbind_from_irqhandler(info->irq, NULL);
                kfree(info);
index 314a05e009df97f840928ce1510ebc905d6534b4..64770c62bbec5436d332821ffaf57074276b2422 100644 (file)
 #define PCI_DEVICE_ID_EXAR_XR17V4358           0x4358
 #define PCI_DEVICE_ID_EXAR_XR17V8358           0x8358
 
+#define PCI_DEVICE_ID_SEALEVEL_710xC           0x1001
+#define PCI_DEVICE_ID_SEALEVEL_720xC           0x1002
+#define PCI_DEVICE_ID_SEALEVEL_740xC           0x1004
+#define PCI_DEVICE_ID_SEALEVEL_780xC           0x1008
+#define PCI_DEVICE_ID_SEALEVEL_716xC           0x1010
+
 #define UART_EXAR_INT0         0x80
 #define UART_EXAR_8XMODE       0x88    /* 8X sampling rate select */
 #define UART_EXAR_SLEEP                0x8b    /* Sleep mode */
@@ -638,6 +644,8 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
                nr_ports = BIT(((pcidev->device & 0x38) >> 3) - 1);
        else if (board->num_ports)
                nr_ports = board->num_ports;
+       else if (pcidev->vendor == PCI_VENDOR_ID_SEALEVEL)
+               nr_ports = pcidev->device & 0xff;
        else
                nr_ports = pcidev->device & 0x0f;
 
@@ -864,6 +872,12 @@ static const struct pci_device_id exar_pci_tbl[] = {
        EXAR_DEVICE(COMMTECH, 4224PCI335, pbn_fastcom335_4),
        EXAR_DEVICE(COMMTECH, 2324PCI335, pbn_fastcom335_4),
        EXAR_DEVICE(COMMTECH, 2328PCI335, pbn_fastcom335_8),
+
+       EXAR_DEVICE(SEALEVEL, 710xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 720xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 740xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 780xC, pbn_exar_XR17V35x),
+       EXAR_DEVICE(SEALEVEL, 716xC, pbn_exar_XR17V35x),
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, exar_pci_tbl);
index d75c39f4622b7b6ae53f1f9bc053dd0c5476083b..d8c2f3455eebad589a712d2e8e6a4be91f3ddcf4 100644 (file)
@@ -1466,6 +1466,10 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
        struct circ_buf *xmit = &uap->port.state->xmit;
        int count = uap->fifosize >> 1;
 
+       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
+           !uap->rs485_tx_started)
+               pl011_rs485_tx_start(uap);
+
        if (uap->port.x_char) {
                if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
                        return true;
@@ -1477,10 +1481,6 @@ static bool pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
                return false;
        }
 
-       if ((uap->port.rs485.flags & SER_RS485_ENABLED) &&
-           !uap->rs485_tx_started)
-               pl011_rs485_tx_start(uap);
-
        /* If we are using DMA mode, try to send some characters. */
        if (pl011_dma_tx_irq(uap))
                return true;
index f1c06e12efa0bb7e720c004fffcc51394e2fd885..9cd7479b03c086e6825218ae905f98a2d4b48f95 100644 (file)
@@ -2657,13 +2657,7 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud,
        else if (mr == ATMEL_US_PAR_ODD)
                *parity = 'o';
 
-       /*
-        * The serial core only rounds down when matching this to a
-        * supported baud rate. Make sure we don't end up slightly
-        * lower than one of those, as it would make us fall through
-        * to a much lower baud rate than we really want.
-        */
-       *baud = port->uartclk / (16 * (quot - 1));
+       *baud = port->uartclk / (16 * quot);
 }
 
 static int __init atmel_console_setup(struct console *co, char *options)
index a3ed9b34e2abb557374f91c64e23a1bad4cb16b2..7ce7bb1640054dd309e2feb45eea9083eb89705e 100644 (file)
@@ -171,6 +171,7 @@ static int configure_kgdboc(void)
        int err = -ENODEV;
        char *cptr = config;
        struct console *cons;
+       int cookie;
 
        if (!strlen(config) || isspace(config[0])) {
                err = 0;
@@ -189,20 +190,9 @@ static int configure_kgdboc(void)
        if (kgdboc_register_kbd(&cptr))
                goto do_register;
 
-       /*
-        * tty_find_polling_driver() can call uart_set_options()
-        * (via poll_init) to configure the uart. Take the console_list_lock
-        * in order to synchronize against register_console(), which can also
-        * configure the uart via uart_set_options(). This also allows safe
-        * traversal of the console list.
-        */
-       console_list_lock();
-
        p = tty_find_polling_driver(cptr, &tty_line);
-       if (!p) {
-               console_list_unlock();
+       if (!p)
                goto noconfig;
-       }
 
        /*
         * Take console_lock to serialize device() callback with
@@ -211,7 +201,8 @@ static int configure_kgdboc(void)
         */
        console_lock();
 
-       for_each_console(cons) {
+       cookie = console_srcu_read_lock();
+       for_each_console_srcu(cons) {
                int idx;
                if (cons->device && cons->device(cons, &idx) == p &&
                    idx == tty_line) {
@@ -219,11 +210,10 @@ static int configure_kgdboc(void)
                        break;
                }
        }
+       console_srcu_read_unlock(cookie);
 
        console_unlock();
 
-       console_list_unlock();
-
        kgdb_tty_driver = p;
        kgdb_tty_line = tty_line;
 
index 3d54a43768cd2497180920ce1103f34d1ad3cd14..9576ba8bbc40ec59aca15ddf3c279f784274944c 100644 (file)
@@ -749,7 +749,7 @@ static void pch_dma_tx_complete(void *arg)
                uart_xmit_advance(port, sg_dma_len(sg));
 
        async_tx_ack(priv->desc_tx);
-       dma_unmap_sg(port->dev, sg, priv->orig_nent, DMA_TO_DEVICE);
+       dma_unmap_sg(port->dev, priv->sg_tx_p, priv->orig_nent, DMA_TO_DEVICE);
        priv->tx_dma_use = 0;
        priv->nent = 0;
        priv->orig_nent = 0;
index b487823f0e61724ba8cdb38ff11f8e5c28bfbf6a..57f04f8bf5043206d23a8f93e4285c8dd8347d4e 100644 (file)
@@ -864,9 +864,10 @@ out_unlock:
        return IRQ_HANDLED;
 }
 
-static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
+static int setup_fifos(struct qcom_geni_serial_port *port)
 {
        struct uart_port *uport;
+       u32 old_rx_fifo_depth = port->rx_fifo_depth;
 
        uport = &port->uport;
        port->tx_fifo_depth = geni_se_get_tx_fifo_depth(&port->se);
@@ -874,6 +875,16 @@ static void get_tx_fifo_size(struct qcom_geni_serial_port *port)
        port->rx_fifo_depth = geni_se_get_rx_fifo_depth(&port->se);
        uport->fifosize =
                (port->tx_fifo_depth * port->tx_fifo_width) / BITS_PER_BYTE;
+
+       if (port->rx_fifo && (old_rx_fifo_depth != port->rx_fifo_depth) && port->rx_fifo_depth) {
+               port->rx_fifo = devm_krealloc(uport->dev, port->rx_fifo,
+                                             port->rx_fifo_depth * sizeof(u32),
+                                             GFP_KERNEL);
+               if (!port->rx_fifo)
+                       return -ENOMEM;
+       }
+
+       return 0;
 }
 
 
@@ -888,6 +899,7 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
        u32 rxstale = DEFAULT_BITS_PER_CHAR * STALE_TIMEOUT;
        u32 proto;
        u32 pin_swap;
+       int ret;
 
        proto = geni_se_read_proto(&port->se);
        if (proto != GENI_SE_UART) {
@@ -897,7 +909,9 @@ static int qcom_geni_serial_port_setup(struct uart_port *uport)
 
        qcom_geni_serial_stop_rx(uport);
 
-       get_tx_fifo_size(port);
+       ret = setup_fifos(port);
+       if (ret)
+               return ret;
 
        writel(rxstale, uport->membase + SE_UART_RX_STALE_CNT);
 
@@ -1516,7 +1530,7 @@ static int qcom_geni_serial_remove(struct platform_device *pdev)
        return 0;
 }
 
-static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
+static int qcom_geni_serial_sys_suspend(struct device *dev)
 {
        struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
        struct uart_port *uport = &port->uport;
@@ -1533,7 +1547,7 @@ static int __maybe_unused qcom_geni_serial_sys_suspend(struct device *dev)
        return uart_suspend_port(private_data->drv, uport);
 }
 
-static int __maybe_unused qcom_geni_serial_sys_resume(struct device *dev)
+static int qcom_geni_serial_sys_resume(struct device *dev)
 {
        int ret;
        struct qcom_geni_serial_port *port = dev_get_drvdata(dev);
@@ -1581,10 +1595,12 @@ static int qcom_geni_serial_sys_hib_resume(struct device *dev)
 }
 
 static const struct dev_pm_ops qcom_geni_serial_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(qcom_geni_serial_sys_suspend,
-                                       qcom_geni_serial_sys_resume)
-       .restore = qcom_geni_serial_sys_hib_resume,
-       .thaw = qcom_geni_serial_sys_hib_resume,
+       .suspend = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .resume = pm_sleep_ptr(qcom_geni_serial_sys_resume),
+       .freeze = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .poweroff = pm_sleep_ptr(qcom_geni_serial_sys_suspend),
+       .restore = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
+       .thaw = pm_sleep_ptr(qcom_geni_serial_sys_hib_resume),
 };
 
 static const struct of_device_id qcom_geni_serial_match_table[] = {
index b9fbbee598b815e8cd41527dc0acec65c8958710..ec874f3a567ca79bea6fd1d3503f5b78dfba4f8b 100644 (file)
@@ -2212,6 +2212,9 @@ EXPORT_SYMBOL_GPL(uart_parse_options);
  * @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
  * @bits: number of data bits
  * @flow: flow control character - 'r' (rts)
+ *
+ * Locking: Caller must hold console_list_lock in order to serialize
+ * early initialization of the serial-console lock.
  */
 int
 uart_set_options(struct uart_port *port, struct console *co,
@@ -2619,7 +2622,9 @@ static int uart_poll_init(struct tty_driver *driver, int line, char *options)
 
        if (!ret && options) {
                uart_parse_options(options, &baud, &parity, &bits, &flow);
+               console_list_lock();
                ret = uart_set_options(port, NULL, baud, parity, bits, flow);
+               console_list_unlock();
        }
 out:
        mutex_unlock(&tport->mutex);
index e18c9f4463ec592a70b95fa7e788e33286a86caf..bda61be5f0357f016ab82c6def893c4ef3071ada 100644 (file)
@@ -6056,6 +6056,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
        }
 }
 
+static void ufshcd_force_error_recovery(struct ufs_hba *hba)
+{
+       spin_lock_irq(hba->host->host_lock);
+       hba->force_reset = true;
+       ufshcd_schedule_eh_work(hba);
+       spin_unlock_irq(hba->host->host_lock);
+}
+
 static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
 {
        down_write(&hba->clk_scaling_lock);
@@ -9083,6 +9091,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
 
                if (!hba->dev_info.b_rpm_dev_flush_capable) {
                        ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+                       if (ret && pm_op != UFS_SHUTDOWN_PM) {
+                               /*
+                                * If return err in suspend flow, IO will hang.
+                                * Trigger error handler and break suspend for
+                                * error recovery.
+                                */
+                               ufshcd_force_error_recovery(hba);
+                               ret = -EBUSY;
+                       }
                        if (ret)
                                goto enable_scaling;
                }
@@ -9094,6 +9111,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
         */
        check_for_bkops = !ufshcd_is_ufs_dev_deepsleep(hba);
        ret = ufshcd_link_state_transition(hba, req_link_state, check_for_bkops);
+       if (ret && pm_op != UFS_SHUTDOWN_PM) {
+               /*
+                * If return err in suspend flow, IO will hang.
+                * Trigger error handler and break suspend for
+                * error recovery.
+                */
+               ufshcd_force_error_recovery(hba);
+               ret = -EBUSY;
+       }
        if (ret)
                goto set_dev_active;
 
index 5adcb349718c33128b0970b6448080dc458f8b5a..ccfaebca6faa7512ac92ad0beeea239f467bbb52 100644 (file)
@@ -2614,6 +2614,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        u8 req_on_hw_ring = 0;
        unsigned long flags;
        int ret = 0;
+       int val;
 
        if (!ep || !request || !ep->desc)
                return -EINVAL;
@@ -2649,6 +2650,13 @@ found:
 
        /* Update ring only if removed request is on pending_req_list list */
        if (req_on_hw_ring && link_trb) {
+               /* Stop DMA */
+               writel(EP_CMD_DFLUSH, &priv_dev->regs->ep_cmd);
+
+               /* wait for DFLUSH cleared */
+               readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
+                                         !(val & EP_CMD_DFLUSH), 1, 1000);
+
                link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma +
                        ((priv_req->end_trb + 1) * TRB_SIZE)));
                link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) |
@@ -2660,6 +2668,10 @@ found:
 
        cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
 
+       req = cdns3_next_request(&priv_ep->pending_req_list);
+       if (req)
+               cdns3_rearm_transfer(priv_ep, 1);
+
 not_found:
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return ret;
index 484b1cd234317076027c7c9986d8e769e54d552b..27c601296130e23f5b3d740d2b2944ccb2bbe586 100644 (file)
@@ -1294,12 +1294,12 @@ static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
        cable_id = &ci->platdata->id_extcon;
        cable_vbus = &ci->platdata->vbus_extcon;
 
-       if ((!IS_ERR(cable_id->edev) || !IS_ERR(ci->role_switch))
+       if ((!IS_ERR(cable_id->edev) || ci->role_switch)
                && ci->is_otg &&
                (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
                ci_irq(ci);
 
-       if ((!IS_ERR(cable_vbus->edev) || !IS_ERR(ci->role_switch))
+       if ((!IS_ERR(cable_vbus->edev) || ci->role_switch)
                && ci->is_otg &&
                (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
                ci_irq(ci);
index 60e8174686a17c731a0a476b90ac8de4c6a15497..d7c8461976ce09133aeeaed474b826ce149738c2 100644 (file)
@@ -207,7 +207,7 @@ static int ulpi_read_id(struct ulpi *ulpi)
        /* Test the interface */
        ret = ulpi_write(ulpi, ULPI_SCRATCH, 0xaa);
        if (ret < 0)
-               return ret;
+               goto err;
 
        ret = ulpi_read(ulpi, ULPI_SCRATCH);
        if (ret < 0)
index 77e73fc8d6736b46a4665e63ec593b4fa96663ba..9eca403af2a85bf4541a74e69ebee75ddaa42c61 100644 (file)
@@ -44,6 +44,9 @@
 #define USB_PRODUCT_USB5534B                   0x5534
 #define USB_VENDOR_CYPRESS                     0x04b4
 #define USB_PRODUCT_CY7C65632                  0x6570
+#define USB_VENDOR_TEXAS_INSTRUMENTS           0x0451
+#define USB_PRODUCT_TUSB8041_USB3              0x8140
+#define USB_PRODUCT_TUSB8041_USB2              0x8142
 #define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND       0x01
 #define HUB_QUIRK_DISABLE_AUTOSUSPEND          0x02
 
@@ -5854,6 +5857,16 @@ static const struct usb_device_id hub_id_table[] = {
       .idVendor = USB_VENDOR_GENESYS_LOGIC,
       .bInterfaceClass = USB_CLASS_HUB,
       .driver_info = HUB_QUIRK_CHECK_PORT_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+      .idProduct = USB_PRODUCT_TUSB8041_USB2,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
+    { .match_flags = USB_DEVICE_ID_MATCH_VENDOR
+                       | USB_DEVICE_ID_MATCH_PRODUCT,
+      .idVendor = USB_VENDOR_TEXAS_INSTRUMENTS,
+      .idProduct = USB_PRODUCT_TUSB8041_USB3,
+      .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
     { .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
       .bDeviceClass = USB_CLASS_HUB},
     { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
index 6d93428432f13f1e2cc8461570eebae935c4429c..533baa85083c2e6773f5b5b7483d20b05b782428 100644 (file)
@@ -37,6 +37,71 @@ bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
 }
 EXPORT_SYMBOL_GPL(usb_acpi_power_manageable);
 
+#define UUID_USB_CONTROLLER_DSM "ce2ee385-00e6-48cb-9f05-2edb927c4899"
+#define USB_DSM_DISABLE_U1_U2_FOR_PORT 5
+
+/**
+ * usb_acpi_port_lpm_incapable - check if lpm should be disabled for a port.
+ * @hdev: USB device belonging to the usb hub
+ * @index: zero based port index
+ *
+ * Some USB3 ports may not support USB3 link power management U1/U2 states
+ * due to different retimer setup. ACPI provides _DSM method which returns 0x01
+ * if U1 and U2 states should be disabled. Evaluate _DSM with:
+ * Arg0: UUID = ce2ee385-00e6-48cb-9f05-2edb927c4899
+ * Arg1: Revision ID = 0
+ * Arg2: Function Index = 5
+ * Arg3: (empty)
+ *
+ * Return 1 if USB3 port is LPM incapable, negative on error, otherwise 0
+ */
+
+int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+{
+       union acpi_object *obj;
+       acpi_handle port_handle;
+       int port1 = index + 1;
+       guid_t guid;
+       int ret;
+
+       ret = guid_parse(UUID_USB_CONTROLLER_DSM, &guid);
+       if (ret)
+               return ret;
+
+       port_handle = usb_get_hub_port_acpi_handle(hdev, port1);
+       if (!port_handle) {
+               dev_dbg(&hdev->dev, "port-%d no acpi handle\n", port1);
+               return -ENODEV;
+       }
+
+       if (!acpi_check_dsm(port_handle, &guid, 0,
+                           BIT(USB_DSM_DISABLE_U1_U2_FOR_PORT))) {
+               dev_dbg(&hdev->dev, "port-%d no _DSM function %d\n",
+                       port1, USB_DSM_DISABLE_U1_U2_FOR_PORT);
+               return -ENODEV;
+       }
+
+       obj = acpi_evaluate_dsm(port_handle, &guid, 0,
+                               USB_DSM_DISABLE_U1_U2_FOR_PORT, NULL);
+
+       if (!obj)
+               return -ENODEV;
+
+       if (obj->type != ACPI_TYPE_INTEGER) {
+               dev_dbg(&hdev->dev, "evaluate port-%d _DSM failed\n", port1);
+               ACPI_FREE(obj);
+               return -EINVAL;
+       }
+
+       if (obj->integer.value == 0x01)
+               ret = 1;
+
+       ACPI_FREE(obj);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(usb_acpi_port_lpm_incapable);
+
 /**
  * usb_acpi_set_power_state - control usb port's power via acpi power
  * resource
index b2f72b0e75c6bbfcd763778425ae841576e4373d..be954a9abbe05e8a18bfbbb1bb2b27869b661f41 100644 (file)
@@ -3,6 +3,7 @@
 config USB_DWC3
        tristate "DesignWare USB3 DRD Core Support"
        depends on (USB || USB_GADGET) && HAS_DMA
+       depends on (EXTCON || EXTCON=n)
        select USB_XHCI_PLATFORM if USB_XHCI_HCD
        select USB_ROLE_SWITCH if USB_DWC3_DUAL_ROLE
        help
@@ -44,7 +45,6 @@ config USB_DWC3_GADGET
 config USB_DWC3_DUAL_ROLE
        bool "Dual Role mode"
        depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
-       depends on (EXTCON=y || EXTCON=USB_DWC3)
        help
          This is the default mode of working of DWC3 controller where
          both host and gadget features are enabled.
index 8607d4c23283c4db9d8afe8b00fdd75b97701bde..0745e9f11b2ef442cfb623f7d65fe42d52dd1e6c 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
+#include <linux/gpio/consumer.h>
 #include <linux/of_gpio.h>
 #include <linux/of_platform.h>
 #include <linux/pm_runtime.h>
index 789976567f9f008c035e182841dbdf9d66757594..89dcfac01235ff092f326cfa0714d51f366df775 100644 (file)
@@ -1727,6 +1727,7 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int
        else if (!ret)
                dep->flags |= DWC3_EP_END_TRANSFER_PENDING;
 
+       dep->flags &= ~DWC3_EP_DELAY_STOP;
        return ret;
 }
 
@@ -3732,8 +3733,10 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force,
        if (dep->number <= 1 && dwc->ep0state != EP0_DATA_PHASE)
                return;
 
+       if (interrupt && (dep->flags & DWC3_EP_DELAY_STOP))
+               return;
+
        if (!(dep->flags & DWC3_EP_TRANSFER_STARTED) ||
-           (dep->flags & DWC3_EP_DELAY_STOP) ||
            (dep->flags & DWC3_EP_END_TRANSFER_PENDING))
                return;
 
index 8a54edf921ac24d5a61aeb3f2c51be5f5d4ad878..ee740a6da463fb9be5f544483f0ddacd6de9ec8f 100644 (file)
@@ -144,10 +144,7 @@ static struct platform_driver fotg210_driver = {
 
 static int __init fotg210_init(void)
 {
-       if (usb_disabled())
-               return -ENODEV;
-
-       if (IS_ENABLED(CONFIG_USB_FOTG210_HCD))
+       if (IS_ENABLED(CONFIG_USB_FOTG210_HCD) && !usb_disabled())
                fotg210_hcd_init();
        return platform_driver_register(&fotg210_driver);
 }
index 66e1b7ee3346eda9e75baf6759c137717bc0dc21..87cca81bf4ac970fcdb902e5add8c0a68eacb0e4 100644 (file)
@@ -1201,6 +1201,8 @@ int fotg210_udc_probe(struct platform_device *pdev)
                dev_info(dev, "found and initialized PHY\n");
        }
 
+       ret = -ENOMEM;
+
        for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
                fotg210->ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
                if (!fotg210->ep[i])
index 96121d1c8df4c71e0d4d0077fa0c37121ddd90ba..0853536cbf2e642c880743854ff79ab74714f756 100644 (file)
@@ -393,6 +393,7 @@ static void gadget_info_attr_release(struct config_item *item)
        WARN_ON(!list_empty(&gi->string_list));
        WARN_ON(!list_empty(&gi->available_func));
        kfree(gi->composite.gadget_driver.function);
+       kfree(gi->composite.gadget_driver.driver.name);
        kfree(gi);
 }
 
@@ -1572,7 +1573,6 @@ static const struct usb_gadget_driver configfs_driver_template = {
        .max_speed      = USB_SPEED_SUPER_PLUS,
        .driver = {
                .owner          = THIS_MODULE,
-               .name           = "configfs-gadget",
        },
        .match_existing_only = 1,
 };
@@ -1623,13 +1623,21 @@ static struct config_group *gadgets_make(
 
        gi->composite.gadget_driver = configfs_driver_template;
 
+       gi->composite.gadget_driver.driver.name = kasprintf(GFP_KERNEL,
+                                                           "configfs-gadget.%s", name);
+       if (!gi->composite.gadget_driver.driver.name)
+               goto err;
+
        gi->composite.gadget_driver.function = kstrdup(name, GFP_KERNEL);
        gi->composite.name = gi->composite.gadget_driver.function;
 
        if (!gi->composite.gadget_driver.function)
-               goto err;
+               goto out_free_driver_name;
 
        return &gi->group;
+
+out_free_driver_name:
+       kfree(gi->composite.gadget_driver.driver.name);
 err:
        kfree(gi);
        return ERR_PTR(-ENOMEM);
index 73dc10a77cdeadda5a3ef63b5e8d5be32959a3ff..523a961b910bb9e917d673b90ca583dde031122a 100644 (file)
@@ -279,6 +279,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
        struct usb_request *req = ffs->ep0req;
        int ret;
 
+       if (!req)
+               return -EINVAL;
+
        req->zero     = len < le16_to_cpu(ffs->ev.setup.wLength);
 
        spin_unlock_irq(&ffs->ev.waitq.lock);
@@ -1892,10 +1895,14 @@ static void functionfs_unbind(struct ffs_data *ffs)
        ENTER();
 
        if (!WARN_ON(!ffs->gadget)) {
+               /* dequeue before freeing ep0req */
+               usb_ep_dequeue(ffs->gadget->ep0, ffs->ep0req);
+               mutex_lock(&ffs->mutex);
                usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
                ffs->ep0req = NULL;
                ffs->gadget = NULL;
                clear_bit(FFS_FL_BOUND, &ffs->flags);
+               mutex_unlock(&ffs->mutex);
                ffs_data_put(ffs);
        }
 }
index c36bcfa0e9b46f17e85e7731fe12ca1a28a769a9..424bb3b666dbd7d545a9d9abfb835c1152b51624 100644 (file)
@@ -83,7 +83,9 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
 /* peak (theoretical) bulk transfer rate in bits-per-second */
 static inline unsigned ncm_bitrate(struct usb_gadget *g)
 {
-       if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+       if (!g)
+               return 0;
+       else if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
                return 4250000000U;
        else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
                return 3750000000U;
index 01c3ead7d1b426a21eb23d1905005c8619682ff4..d605bc2e7e8fd43dd0725e221e22786d96e22bfd 100644 (file)
@@ -229,6 +229,7 @@ static void put_ep (struct ep_data *data)
  */
 
 static const char *CHIP;
+static DEFINE_MUTEX(sb_mutex);         /* Serialize superblock operations */
 
 /*----------------------------------------------------------------------*/
 
@@ -2010,13 +2011,20 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
 {
        struct inode    *inode;
        struct dev_data *dev;
+       int             rc;
 
-       if (the_device)
-               return -ESRCH;
+       mutex_lock(&sb_mutex);
+
+       if (the_device) {
+               rc = -ESRCH;
+               goto Done;
+       }
 
        CHIP = usb_get_gadget_udc_name();
-       if (!CHIP)
-               return -ENODEV;
+       if (!CHIP) {
+               rc = -ENODEV;
+               goto Done;
+       }
 
        /* superblock */
        sb->s_blocksize = PAGE_SIZE;
@@ -2053,13 +2061,17 @@ gadgetfs_fill_super (struct super_block *sb, struct fs_context *fc)
         * from binding to a controller.
         */
        the_device = dev;
-       return 0;
+       rc = 0;
+       goto Done;
 
-Enomem:
+ Enomem:
        kfree(CHIP);
        CHIP = NULL;
+       rc = -ENOMEM;
 
-       return -ENOMEM;
+ Done:
+       mutex_unlock(&sb_mutex);
+       return rc;
 }
 
 /* "mount -t gadgetfs path /dev/gadget" ends up here */
@@ -2081,6 +2093,7 @@ static int gadgetfs_init_fs_context(struct fs_context *fc)
 static void
 gadgetfs_kill_sb (struct super_block *sb)
 {
+       mutex_lock(&sb_mutex);
        kill_litter_super (sb);
        if (the_device) {
                put_dev (the_device);
@@ -2088,6 +2101,7 @@ gadgetfs_kill_sb (struct super_block *sb)
        }
        kfree(CHIP);
        CHIP = NULL;
+       mutex_unlock(&sb_mutex);
 }
 
 /*----------------------------------------------------------------------*/
index 53e38f87472b040cc953ba2dfd65dcdfbe53f82f..c06dd1af7a0c506a17210bbc9ea41ac5355985d9 100644 (file)
@@ -293,6 +293,7 @@ static const struct uvc_descriptor_header * const uvc_fs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -305,6 +306,7 @@ static const struct uvc_descriptor_header * const uvc_hs_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
@@ -317,6 +319,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
        (const struct uvc_descriptor_header *) &uvc_format_yuv,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_yuv_720p,
+       (const struct uvc_descriptor_header *) &uvc_color_matching,
        (const struct uvc_descriptor_header *) &uvc_format_mjpg,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_360p,
        (const struct uvc_descriptor_header *) &uvc_frame_mjpg_720p,
index 9cea785934e596434a0ff380667f29771913168a..38d06e5abfbb35e6ba2c2912f9a613b6c792bf35 100644 (file)
@@ -29,7 +29,7 @@
 #include "ehci-fsl.h"
 
 #define DRIVER_DESC "Freescale EHCI Host controller driver"
-#define DRV_NAME "ehci-fsl"
+#define DRV_NAME "fsl-ehci"
 
 static struct hc_driver __read_mostly fsl_ehci_hc_driver;
 
index de1b091583183f5c86a90fab95e11eec97bebb4f..46fdab940092e334af942b88f4f08906d038118e 100644 (file)
@@ -1530,15 +1530,13 @@ static void xenhcd_backend_changed(struct xenbus_device *dev,
        }
 }
 
-static int xenhcd_remove(struct xenbus_device *dev)
+static void xenhcd_remove(struct xenbus_device *dev)
 {
        struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
        struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
 
        xenhcd_destroy_rings(info);
        usb_put_hcd(hcd);
-
-       return 0;
 }
 
 static int xenhcd_probe(struct xenbus_device *dev,
index 79d679b3e07607b26349e211f1fa37f8248bd0f9..fb988e4ea9244e160e7b2af6aabf01901b9b4d63 100644 (file)
@@ -78,9 +78,12 @@ static const char hcd_name[] = "xhci_hcd";
 static struct hc_driver __read_mostly xhci_pci_hc_driver;
 
 static int xhci_pci_setup(struct usb_hcd *hcd);
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                                     struct usb_tt *tt, gfp_t mem_flags);
 
 static const struct xhci_driver_overrides xhci_pci_overrides __initconst = {
        .reset = xhci_pci_setup,
+       .update_hub_device = xhci_pci_update_hub_device,
 };
 
 /* called after powerup, by probe or system-pm "wakeup" */
@@ -352,8 +355,38 @@ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
                                NULL);
        ACPI_FREE(obj);
 }
+
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev)
+{
+       struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+       struct xhci_hub *rhub = &xhci->usb3_rhub;
+       int ret;
+       int i;
+
+       /* This is not the usb3 roothub we are looking for */
+       if (hcd != rhub->hcd)
+               return;
+
+       if (hdev->maxchild > rhub->num_ports) {
+               dev_err(&hdev->dev, "USB3 roothub port number mismatch\n");
+               return;
+       }
+
+       for (i = 0; i < hdev->maxchild; i++) {
+               ret = usb_acpi_port_lpm_incapable(hdev, i);
+
+               dev_dbg(&hdev->dev, "port-%d disable U1/U2 _DSM: %d\n", i + 1, ret);
+
+               if (ret >= 0) {
+                       rhub->ports[i]->lpm_incapable = ret;
+                       continue;
+               }
+       }
+}
+
 #else
 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
+static void xhci_find_lpm_incapable_ports(struct usb_hcd *hcd, struct usb_device *hdev) { }
 #endif /* CONFIG_ACPI */
 
 /* called during probe() after chip reset completes */
@@ -386,6 +419,16 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
        return xhci_pci_reinit(xhci, pdev);
 }
 
+static int xhci_pci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                                     struct usb_tt *tt, gfp_t mem_flags)
+{
+       /* Check if acpi claims some USB3 roothub ports are lpm incapable */
+       if (!hdev->parent)
+               xhci_find_lpm_incapable_ports(hcd, hdev);
+
+       return xhci_update_hub_device(hcd, hdev, tt, mem_flags);
+}
+
 /*
  * We need to register our own PCI probe function (instead of the USB core's
  * function) in order to create a second roothub under xHCI.
@@ -455,6 +498,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (xhci->quirks & XHCI_DEFAULT_PM_RUNTIME_ALLOW)
                pm_runtime_allow(&dev->dev);
 
+       dma_set_max_seg_size(&dev->dev, UINT_MAX);
+
        return 0;
 
 put_usb3_hcd:
index ddc30037f9cefa6f606f590ea1ed79325b0ba4da..f5b0e1ce22af201582708e6f6c1803935ce4e412 100644 (file)
@@ -1169,7 +1169,10 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci,
        struct xhci_virt_ep *ep;
        struct xhci_ring *ring;
 
-       ep = &xhci->devs[slot_id]->eps[ep_index];
+       ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+       if (!ep)
+               return;
+
        if ((ep->ep_state & EP_HAS_STREAMS) ||
                        (ep->ep_state & EP_GETTING_NO_STREAMS)) {
                int stream_id;
index 79d7931c048a8cd785101adb064c2f0ea8908f2a..2b280beb001150c59f05d0fc627a6b0cc7d4e879 100644 (file)
@@ -3974,6 +3974,7 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        struct xhci_virt_device *virt_dev;
        struct xhci_slot_ctx *slot_ctx;
+       unsigned long flags;
        int i, ret;
 
        /*
@@ -4000,7 +4001,11 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
                virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
        virt_dev->udev = NULL;
        xhci_disable_slot(xhci, udev->slot_id);
+
+       spin_lock_irqsave(&xhci->lock, flags);
        xhci_free_virt_device(xhci, udev->slot_id);
+       spin_unlock_irqrestore(&xhci->lock, flags);
+
 }
 
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
@@ -5044,6 +5049,7 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
                        struct usb_device *udev, enum usb3_link_state state)
 {
        struct xhci_hcd *xhci;
+       struct xhci_port *port;
        u16 hub_encoded_timeout;
        int mel;
        int ret;
@@ -5060,6 +5066,13 @@ static int xhci_enable_usb3_lpm_timeout(struct usb_hcd *hcd,
        if (xhci_check_tier_policy(xhci, udev, state) < 0)
                return USB3_LPM_DISABLED;
 
+       /* If connected to root port then check port can handle lpm */
+       if (udev->parent && !udev->parent->parent) {
+               port = xhci->usb3_rhub.ports[udev->portnum - 1];
+               if (port->lpm_incapable)
+                       return USB3_LPM_DISABLED;
+       }
+
        hub_encoded_timeout = xhci_calculate_lpm_timeout(hcd, udev, state);
        mel = calculate_max_exit_latency(udev, state, hub_encoded_timeout);
        if (mel < 0) {
@@ -5119,7 +5132,7 @@ static int xhci_disable_usb3_lpm_timeout(struct usb_hcd *hcd,
 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
  * internal data structures for the device.
  */
-static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
                        struct usb_tt *tt, gfp_t mem_flags)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -5219,6 +5232,7 @@ static int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
        xhci_free_command(xhci, config_cmd);
        return ret;
 }
+EXPORT_SYMBOL_GPL(xhci_update_hub_device);
 
 static int xhci_get_frame(struct usb_hcd *hcd)
 {
@@ -5502,6 +5516,8 @@ void xhci_init_driver(struct hc_driver *drv,
                        drv->check_bandwidth = over->check_bandwidth;
                if (over->reset_bandwidth)
                        drv->reset_bandwidth = over->reset_bandwidth;
+               if (over->update_hub_device)
+                       drv->update_hub_device = over->update_hub_device;
        }
 }
 EXPORT_SYMBOL_GPL(xhci_init_driver);
index c9f06c5e4e9d2a7795aba70c9099e6bce937134f..dcee7f3207add0bba0ebb9c9d449d0b2608e9ecc 100644 (file)
@@ -1735,6 +1735,7 @@ struct xhci_port {
        int                     hcd_portnum;
        struct xhci_hub         *rhub;
        struct xhci_port_cap    *port_cap;
+       unsigned int            lpm_incapable:1;
 };
 
 struct xhci_hub {
@@ -1943,6 +1944,8 @@ struct xhci_driver_overrides {
                             struct usb_host_endpoint *ep);
        int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
        void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
+       int (*update_hub_device)(struct usb_hcd *hcd, struct usb_device *hdev,
+                           struct usb_tt *tt, gfp_t mem_flags);
 };
 
 #define        XHCI_CFC_DELAY          10
@@ -2122,6 +2125,8 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
                       struct usb_host_endpoint *ep);
 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
+                          struct usb_tt *tt, gfp_t mem_flags);
 int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
 int xhci_ext_cap_init(struct xhci_hcd *xhci);
 
index f9427a67789cf7e26a1c95ecb28f17087ad30d42..1e3df27bab58fd1cb2be079dab3bbdca43e6a51a 100644 (file)
@@ -814,7 +814,7 @@ static int iowarrior_probe(struct usb_interface *interface,
                        break;
 
                case USB_DEVICE_ID_CODEMERCS_IOW100:
-                       dev->report_size = 13;
+                       dev->report_size = 12;
                        break;
                }
        }
index 94e7966e199d19a2cf27e7f41de6b179d105d8d2..969c4c4f2ae92dbdb074f356dcbc688e1e1aa0f2 100644 (file)
 
 #include "onboard_usb_hub.h"
 
+static void onboard_hub_attach_usb_driver(struct work_struct *work);
+
 static struct usb_device_driver onboard_hub_usbdev_driver;
+static DECLARE_WORK(attach_usb_driver_work, onboard_hub_attach_usb_driver);
 
 /************************** Platform driver **************************/
 
@@ -45,7 +48,6 @@ struct onboard_hub {
        bool is_powered_on;
        bool going_away;
        struct list_head udev_list;
-       struct work_struct attach_usb_driver_work;
        struct mutex lock;
 };
 
@@ -271,8 +273,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
         * This needs to be done deferred to avoid self-deadlocks on systems
         * with nested onboard hubs.
         */
-       INIT_WORK(&hub->attach_usb_driver_work, onboard_hub_attach_usb_driver);
-       schedule_work(&hub->attach_usb_driver_work);
+       schedule_work(&attach_usb_driver_work);
 
        return 0;
 }
@@ -285,9 +286,6 @@ static int onboard_hub_remove(struct platform_device *pdev)
 
        hub->going_away = true;
 
-       if (&hub->attach_usb_driver_work != current_work())
-               cancel_work_sync(&hub->attach_usb_driver_work);
-
        mutex_lock(&hub->lock);
 
        /* unbind the USB devices to avoid dangling references to this device */
@@ -433,13 +431,13 @@ static int __init onboard_hub_init(void)
 {
        int ret;
 
-       ret = platform_driver_register(&onboard_hub_driver);
+       ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
        if (ret)
                return ret;
 
-       ret = usb_register_device_driver(&onboard_hub_usbdev_driver, THIS_MODULE);
+       ret = platform_driver_register(&onboard_hub_driver);
        if (ret)
-               platform_driver_unregister(&onboard_hub_driver);
+               usb_deregister_device_driver(&onboard_hub_usbdev_driver);
 
        return ret;
 }
@@ -449,6 +447,8 @@ static void __exit onboard_hub_exit(void)
 {
        usb_deregister_device_driver(&onboard_hub_usbdev_driver);
        platform_driver_unregister(&onboard_hub_driver);
+
+       cancel_work_sync(&attach_usb_driver_work);
 }
 module_exit(onboard_hub_exit);
 
index 476f55d1fec3084a33ecd586855d626e01ad4caf..44a21ec865fb219ec80eb34e66c46bc1ee7c6265 100644 (file)
@@ -411,8 +411,10 @@ static int omap2430_probe(struct platform_device *pdev)
                memset(musb_res, 0, sizeof(*musb_res) * ARRAY_SIZE(musb_res));
 
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-               if (!res)
+               if (!res) {
+                       ret = -EINVAL;
                        goto err2;
+               }
 
                musb_res[i].start = res->start;
                musb_res[i].end = res->end;
index 67372acc23529da1cb5c4e0b2f1e07ca15d7dee6..832ad592b7ef3bed8411922c9fec294591ea0756 100644 (file)
@@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
        { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
        { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
+       { USB_DEVICE(0x0908, 0x0070) }, /* Siemens SCALANCE LPE-9000 USB Serial Console */
        { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
        { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
        { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
index dee79c7d82d5ce475f67477e8ed68709065ff16a..ee5ac4ef7e1620cf0471ccf2f34b4ee6d3768dc9 100644 (file)
@@ -255,10 +255,16 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EP06                   0x0306
 #define QUECTEL_PRODUCT_EM05G                  0x030a
 #define QUECTEL_PRODUCT_EM060K                 0x030b
+#define QUECTEL_PRODUCT_EM05G_CS               0x030c
+#define QUECTEL_PRODUCT_EM05CN_SG              0x0310
 #define QUECTEL_PRODUCT_EM05G_SG               0x0311
+#define QUECTEL_PRODUCT_EM05CN                 0x0312
+#define QUECTEL_PRODUCT_EM05G_GR               0x0313
+#define QUECTEL_PRODUCT_EM05G_RS               0x0314
 #define QUECTEL_PRODUCT_EM12                   0x0512
 #define QUECTEL_PRODUCT_RM500Q                 0x0800
 #define QUECTEL_PRODUCT_RM520N                 0x0801
+#define QUECTEL_PRODUCT_EC200U                 0x0901
 #define QUECTEL_PRODUCT_EC200S_CN              0x6002
 #define QUECTEL_PRODUCT_EC200T                 0x6026
 #define QUECTEL_PRODUCT_RM500K                 0x7001
@@ -1159,8 +1165,18 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05CN_SG, 0xff),
+         .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff),
          .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_CS, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_GR, 0xff),
+         .driver_info = RSVD(6) | ZLP },
+       { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_RS, 0xff),
+         .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G_SG, 0xff),
          .driver_info = RSVD(6) | ZLP },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) },
@@ -1180,6 +1196,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0xff, 0x30) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0x40) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) },
index 3f720faa6f97c1d313ba45c48822b7c7d6aa7496..d73282c0ec50174e7c37d85a0253fe11abaa55b4 100644 (file)
@@ -116,6 +116,19 @@ static int uas_use_uas_driver(struct usb_interface *intf,
        if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bc2)
                flags |= US_FL_NO_ATA_1X;
 
+       /*
+        * RTL9210-based enclosure from HIKSEMI, MD202 reportedly have issues
+        * with UAS.  This isn't distinguishable with just idVendor and
+        * idProduct, use manufacturer and product too.
+        *
+        * Reported-by: Hongling Zeng <zenghongling@kylinos.cn>
+        */
+       if (le16_to_cpu(udev->descriptor.idVendor) == 0x0bda &&
+                       le16_to_cpu(udev->descriptor.idProduct) == 0x9210 &&
+                       (udev->manufacturer && !strcmp(udev->manufacturer, "HIKSEMI")) &&
+                       (udev->product && !strcmp(udev->product, "MD202")))
+               flags |= US_FL_IGNORE_UAS;
+
        usb_stor_adjust_quirks(udev, &flags);
 
        if (flags & US_FL_IGNORE_UAS) {
index 251778d14e2dd41f848286fa5d2ea50c93c40e61..c7b763d6d102394aba9080cfc104db9cb2b3c9c3 100644 (file)
@@ -83,13 +83,6 @@ UNUSUAL_DEV(0x0bc2, 0x331a, 0x0000, 0x9999,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_NO_REPORT_LUNS),
 
-/* Reported-by: Hongling Zeng <zenghongling@kylinos.cn> */
-UNUSUAL_DEV(0x0bda, 0x9210, 0x0000, 0x9999,
-               "Hiksemi",
-               "External HDD",
-               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
-               US_FL_IGNORE_UAS),
-
 /* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
 UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
                "Initio Corporation",
index de66a2949e33b2b2f029056af449228b851904f6..9a6860285fbec44342a45dee74e84af9a4dd2ad3 100644 (file)
@@ -419,6 +419,18 @@ static const char * const pin_assignments[] = {
        [DP_PIN_ASSIGN_F] = "F",
 };
 
+/*
+ * Helper function to extract a peripheral's currently supported
+ * Pin Assignments from its DisplayPort alternate mode state.
+ */
+static u8 get_current_pin_assignments(struct dp_altmode *dp)
+{
+       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_UFP_U_AS_DFP_D)
+               return DP_CAP_PIN_ASSIGN_DFP_D(dp->alt->vdo);
+       else
+               return DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo);
+}
+
 static ssize_t
 pin_assignment_store(struct device *dev, struct device_attribute *attr,
                     const char *buf, size_t size)
@@ -445,10 +457,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
                goto out_unlock;
        }
 
-       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
-               assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
-       else
-               assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+       assignments = get_current_pin_assignments(dp);
 
        if (!(DP_CONF_GET_PIN_ASSIGN(conf) & assignments)) {
                ret = -EINVAL;
@@ -485,10 +494,7 @@ static ssize_t pin_assignment_show(struct device *dev,
 
        cur = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
 
-       if (DP_CONF_CURRENTLY(dp->data.conf) == DP_CONF_DFP_D)
-               assignments = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo);
-       else
-               assignments = DP_CAP_DFP_D_PIN_ASSIGN(dp->alt->vdo);
+       assignments = get_current_pin_assignments(dp);
 
        for (i = 0; assignments; assignments >>= 1, i++) {
                if (assignments & 1) {
index 904c7b4ce2f0c278ed5f36f5d7ccb91ac02ff685..59b366b5c6144728d7637ffca353d93c351b8e2b 100644 (file)
@@ -4594,14 +4594,13 @@ static void run_state_machine(struct tcpm_port *port)
                tcpm_set_state(port, ready_state(port), 0);
                break;
        case DR_SWAP_CHANGE_DR:
-               if (port->data_role == TYPEC_HOST) {
-                       tcpm_unregister_altmodes(port);
+               tcpm_unregister_altmodes(port);
+               if (port->data_role == TYPEC_HOST)
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_DEVICE);
-               } else {
+               else
                        tcpm_set_roles(port, true, port->pwr_role,
                                       TYPEC_HOST);
-               }
                tcpm_ams_finish(port);
                tcpm_set_state(port, ready_state(port), 0);
                break;
index eabe519013e7850837c56d60f718d55a27f07fc3..1292241d581a67829c6dcbf98dc5c32257d33aea 100644 (file)
@@ -187,6 +187,7 @@ EXPORT_SYMBOL_GPL(ucsi_send_command);
 
 struct ucsi_work {
        struct delayed_work work;
+       struct list_head node;
        unsigned long delay;
        unsigned int count;
        struct ucsi_connector *con;
@@ -202,6 +203,7 @@ static void ucsi_poll_worker(struct work_struct *work)
        mutex_lock(&con->lock);
 
        if (!con->partner) {
+               list_del(&uwork->node);
                mutex_unlock(&con->lock);
                kfree(uwork);
                return;
@@ -209,10 +211,12 @@ static void ucsi_poll_worker(struct work_struct *work)
 
        ret = uwork->cb(con);
 
-       if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
+       if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT)) {
                queue_delayed_work(con->wq, &uwork->work, uwork->delay);
-       else
+       } else {
+               list_del(&uwork->node);
                kfree(uwork);
+       }
 
        mutex_unlock(&con->lock);
 }
@@ -236,6 +240,7 @@ static int ucsi_partner_task(struct ucsi_connector *con,
        uwork->con = con;
        uwork->cb = cb;
 
+       list_add_tail(&uwork->node, &con->partner_tasks);
        queue_delayed_work(con->wq, &uwork->work, delay);
 
        return 0;
@@ -1056,6 +1061,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
        INIT_WORK(&con->work, ucsi_handle_connector_change);
        init_completion(&con->complete);
        mutex_init(&con->lock);
+       INIT_LIST_HEAD(&con->partner_tasks);
        con->num = index + 1;
        con->ucsi = ucsi;
 
@@ -1420,8 +1426,20 @@ void ucsi_unregister(struct ucsi *ucsi)
                ucsi_unregister_altmodes(&ucsi->connector[i],
                                         UCSI_RECIPIENT_CON);
                ucsi_unregister_port_psy(&ucsi->connector[i]);
-               if (ucsi->connector[i].wq)
+
+               if (ucsi->connector[i].wq) {
+                       struct ucsi_work *uwork;
+
+                       mutex_lock(&ucsi->connector[i].lock);
+                       /*
+                        * queue delayed items immediately so they can execute
+                        * and free themselves before the wq is destroyed
+                        */
+                       list_for_each_entry(uwork, &ucsi->connector[i].partner_tasks, node)
+                               mod_delayed_work(ucsi->connector[i].wq, &uwork->work, 0);
+                       mutex_unlock(&ucsi->connector[i].lock);
                        destroy_workqueue(ucsi->connector[i].wq);
+               }
                typec_unregister_port(ucsi->connector[i].port);
        }
 
index c968474ee547396fa64d4469b5805e37ed18efb4..60ce9fb6e7450f5c29168393e31ef78fe23c875e 100644 (file)
@@ -322,6 +322,7 @@ struct ucsi_connector {
        struct work_struct work;
        struct completion complete;
        struct workqueue_struct *wq;
+       struct list_head partner_tasks;
 
        struct typec_port *port;
        struct typec_partner *partner;
index 6af9fdbb86b7a4c40b4c7d8f7ac1df070b0e5932..058fbe28107e9e2740c698ad862a1b1b747c8cc1 100644 (file)
@@ -116,8 +116,9 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
                          int inlen);
 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                            bool *change_map);
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
+                            bool *change_map, unsigned int asid);
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+                       unsigned int asid);
 void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev);
 
 #define mlx5_vdpa_warn(__dev, format, ...)                                                         \
index a639b9208d4148b045210bcb687556cd7ba602ff..0a1e0b0dc37e3d2264fa9d2b4713463a95afb833 100644 (file)
@@ -311,7 +311,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
        u64 st;
        u64 sz;
        int err;
-       int i = 0;
 
        st = start;
        while (size) {
@@ -336,7 +335,6 @@ static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8
                mr->num_directs++;
                mr->num_klms++;
                st += sz;
-               i++;
        }
        list_splice_tail(&tmp, &mr->head);
        return 0;
@@ -511,7 +509,8 @@ out:
        mutex_unlock(&mr->mkey_mtx);
 }
 
-static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev,
+                               struct vhost_iotlb *iotlb, unsigned int asid)
 {
        struct mlx5_vdpa_mr *mr = &mvdev->mr;
        int err;
@@ -519,42 +518,49 @@ static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
        if (mr->initialized)
                return 0;
 
-       if (iotlb)
-               err = create_user_mr(mvdev, iotlb);
-       else
-               err = create_dma_mr(mvdev, mr);
+       if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+               if (iotlb)
+                       err = create_user_mr(mvdev, iotlb);
+               else
+                       err = create_dma_mr(mvdev, mr);
 
-       if (err)
-               return err;
+               if (err)
+                       return err;
+       }
 
-       err = dup_iotlb(mvdev, iotlb);
-       if (err)
-               goto out_err;
+       if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid) {
+               err = dup_iotlb(mvdev, iotlb);
+               if (err)
+                       goto out_err;
+       }
 
        mr->initialized = true;
        return 0;
 
 out_err:
-       if (iotlb)
-               destroy_user_mr(mvdev, mr);
-       else
-               destroy_dma_mr(mvdev, mr);
+       if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
+               if (iotlb)
+                       destroy_user_mr(mvdev, mr);
+               else
+                       destroy_dma_mr(mvdev, mr);
+       }
 
        return err;
 }
 
-int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+                       unsigned int asid)
 {
        int err;
 
        mutex_lock(&mvdev->mr.mkey_mtx);
-       err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+       err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
        mutex_unlock(&mvdev->mr.mkey_mtx);
        return err;
 }
 
 int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
-                            bool *change_map)
+                            bool *change_map, unsigned int asid)
 {
        struct mlx5_vdpa_mr *mr = &mvdev->mr;
        int err = 0;
@@ -566,7 +572,7 @@ int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *io
                *change_map = true;
        }
        if (!*change_map)
-               err = _mlx5_vdpa_create_mr(mvdev, iotlb);
+               err = _mlx5_vdpa_create_mr(mvdev, iotlb, asid);
        mutex_unlock(&mr->mkey_mtx);
 
        return err;
index 90913365def43de809ba8c9ef4de1675c656816a..3a6dbbc6440d45f1bd3ac91f024578a643902894 100644 (file)
@@ -1468,11 +1468,13 @@ static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac,
        dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
        eth_broadcast_addr(dmac_c);
        ether_addr_copy(dmac_v, mac);
-       MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+       if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) {
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
+       }
        if (tagged) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
-               MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, first_vid);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, vid);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, vid);
        }
        flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
@@ -1684,7 +1686,7 @@ static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
 
                /* Need recreate the flow table entry, so that the packet could forward back
                 */
-               mac_vlan_del(ndev, ndev->config.mac, 0, false);
+               mac_vlan_del(ndev, mac_back, 0, false);
 
                if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) {
                        mlx5_vdpa_warn(mvdev, "failed to insert forward rules, try to restore\n");
@@ -1821,6 +1823,9 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd)
        size_t read;
        u16 id;
 
+       if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)))
+               return status;
+
        switch (cmd) {
        case VIRTIO_NET_CTRL_VLAN_ADD:
                read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan));
@@ -2389,7 +2394,8 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev)
        }
 }
 
-static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev,
+                               struct vhost_iotlb *iotlb, unsigned int asid)
 {
        struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
        int err;
@@ -2401,7 +2407,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb
 
        teardown_driver(ndev);
        mlx5_vdpa_destroy_mr(mvdev);
-       err = mlx5_vdpa_create_mr(mvdev, iotlb);
+       err = mlx5_vdpa_create_mr(mvdev, iotlb, asid);
        if (err)
                goto err_mr;
 
@@ -2582,7 +2588,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
        ++mvdev->generation;
 
        if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
-               if (mlx5_vdpa_create_mr(mvdev, NULL))
+               if (mlx5_vdpa_create_mr(mvdev, NULL, 0))
                        mlx5_vdpa_warn(mvdev, "create MR failed\n");
        }
        up_write(&ndev->reslock);
@@ -2618,41 +2624,20 @@ static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev)
        return mvdev->generation;
 }
 
-static int set_map_control(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
-{
-       u64 start = 0ULL, last = 0ULL - 1;
-       struct vhost_iotlb_map *map;
-       int err = 0;
-
-       spin_lock(&mvdev->cvq.iommu_lock);
-       vhost_iotlb_reset(mvdev->cvq.iotlb);
-
-       for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
-            map = vhost_iotlb_itree_next(map, start, last)) {
-               err = vhost_iotlb_add_range(mvdev->cvq.iotlb, map->start,
-                                           map->last, map->addr, map->perm);
-               if (err)
-                       goto out;
-       }
-
-out:
-       spin_unlock(&mvdev->cvq.iommu_lock);
-       return err;
-}
-
-static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb)
+static int set_map_data(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
+                       unsigned int asid)
 {
        bool change_map;
        int err;
 
-       err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map);
+       err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map, asid);
        if (err) {
                mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err);
                return err;
        }
 
        if (change_map)
-               err = mlx5_vdpa_change_map(mvdev, iotlb);
+               err = mlx5_vdpa_change_map(mvdev, iotlb, asid);
 
        return err;
 }
@@ -2665,16 +2650,7 @@ static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid,
        int err = -EINVAL;
 
        down_write(&ndev->reslock);
-       if (mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP] == asid) {
-               err = set_map_data(mvdev, iotlb);
-               if (err)
-                       goto out;
-       }
-
-       if (mvdev->group2asid[MLX5_VDPA_CVQ_GROUP] == asid)
-               err = set_map_control(mvdev, iotlb);
-
-out:
+       err = set_map_data(mvdev, iotlb, asid);
        up_write(&ndev->reslock);
        return err;
 }
@@ -2840,8 +2816,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev)
        int i;
 
        down_write(&ndev->reslock);
-       mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
        ndev->nb_registered = false;
+       mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
        flush_workqueue(ndev->mvdev.wq);
        for (i = 0; i < ndev->cur_num_vqs; i++) {
                mvq = &ndev->vqs[i];
@@ -3019,7 +2995,7 @@ static void update_carrier(struct work_struct *work)
        else
                ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
 
-       if (ndev->config_cb.callback)
+       if (ndev->nb_registered && ndev->config_cb.callback)
                ndev->config_cb.callback(ndev->config_cb.private);
 
        kfree(wqent);
@@ -3036,21 +3012,13 @@ static int event_handler(struct notifier_block *nb, unsigned long event, void *p
                switch (eqe->sub_type) {
                case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
                case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
-                       down_read(&ndev->reslock);
-                       if (!ndev->nb_registered) {
-                               up_read(&ndev->reslock);
-                               return NOTIFY_DONE;
-                       }
                        wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
-                       if (!wqent) {
-                               up_read(&ndev->reslock);
+                       if (!wqent)
                                return NOTIFY_DONE;
-                       }
 
                        wqent->mvdev = &ndev->mvdev;
                        INIT_WORK(&wqent->work, update_carrier);
                        queue_work(ndev->mvdev.wq, &wqent->work);
-                       up_read(&ndev->reslock);
                        ret = NOTIFY_OK;
                        break;
                default:
@@ -3185,7 +3153,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
                goto err_mpfs;
 
        if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
-               err = mlx5_vdpa_create_mr(mvdev, NULL);
+               err = mlx5_vdpa_create_mr(mvdev, NULL, 0);
                if (err)
                        goto err_res;
        }
@@ -3237,8 +3205,8 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
        struct workqueue_struct *wq;
 
        if (ndev->nb_registered) {
-               mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
                ndev->nb_registered = false;
+               mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
        }
        wq = mvdev->wq;
        mvdev->wq = NULL;
index febdc99b51a7b782d801a6564860a3ee091f1e0a..8ef7aa1365cc52a1844101e4019d755548a75396 100644 (file)
@@ -855,7 +855,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
 
        features_device = vdev->config->get_device_features(vdev);
 
-       if (nla_put_u64_64bit(msg, VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES, features_device,
+       if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
                              VDPA_ATTR_PAD))
                return -EMSGSIZE;
 
@@ -935,7 +935,6 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
 {
        struct virtio_net_config config = {};
        u64 features;
-       u16 max_vqp;
        u8 status;
        int err;
 
@@ -946,15 +945,15 @@ static int vdpa_fill_stats_rec(struct vdpa_device *vdev, struct sk_buff *msg,
        }
        vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
 
-       max_vqp = __virtio16_to_cpu(true, config.max_virtqueue_pairs);
-       if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, max_vqp))
-               return -EMSGSIZE;
-
        features = vdev->config->get_driver_features(vdev);
        if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES,
                              features, VDPA_ATTR_PAD))
                return -EMSGSIZE;
 
+       err = vdpa_dev_net_mq_config_fill(msg, features, &config);
+       if (err)
+               return err;
+
        if (nla_put_u32(msg, VDPA_ATTR_DEV_QUEUE_INDEX, index))
                return -EMSGSIZE;
 
index b071f0d842fbade2db1ab9a2d10dc9dd03cb80ee..cb88891b44a8c4218c244b39333cbcfa8faee8df 100644 (file)
@@ -67,8 +67,7 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
 {
        struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
 
-       vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
-                         VDPASIM_QUEUE_MAX, false,
+       vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
                          (struct vring_desc *)(uintptr_t)vq->desc_addr,
                          (struct vring_avail *)
                          (uintptr_t)vq->driver_addr,
@@ -690,7 +689,9 @@ static void vdpasim_free(struct vdpa_device *vdpa)
        }
 
        kvfree(vdpasim->buffer);
-       vhost_iotlb_free(vdpasim->iommu);
+       for (i = 0; i < vdpasim->dev_attr.nas; i++)
+               vhost_iotlb_reset(&vdpasim->iommu[i]);
+       kfree(vdpasim->iommu);
        kfree(vdpasim->vqs);
        kfree(vdpasim->config);
 }
index c6db1a1baf768594f43cf265b51530bb8880db12..f745926237a88dc34176cfde834368dfc31ddf13 100644 (file)
@@ -427,8 +427,10 @@ static int __init vdpasim_blk_init(void)
        int ret;
 
        ret = device_register(&vdpasim_blk_mgmtdev);
-       if (ret)
+       if (ret) {
+               put_device(&vdpasim_blk_mgmtdev);
                return ret;
+       }
 
        ret = vdpa_mgmtdev_register(&mgmt_dev);
        if (ret)
index c3cb225ea4693af4fa59269000c150c7b0376c25..584b975a98a7ef3c8e05e0411292f546b536ec81 100644 (file)
@@ -62,6 +62,9 @@ static bool receive_filter(struct vdpasim *vdpasim, size_t len)
        if (len < ETH_ALEN + hdr_len)
                return false;
 
+       if (is_broadcast_ether_addr(vdpasim->buffer + hdr_len) ||
+           is_multicast_ether_addr(vdpasim->buffer + hdr_len))
+               return true;
        if (!strncmp(vdpasim->buffer + hdr_len, vio_config->mac, ETH_ALEN))
                return true;
 
@@ -305,8 +308,10 @@ static int __init vdpasim_net_init(void)
        int ret;
 
        ret = device_register(&vdpasim_net_mgmtdev);
-       if (ret)
+       if (ret) {
+               put_device(&vdpasim_net_mgmtdev);
                return ret;
+       }
 
        ret = vdpa_mgmtdev_register(&mgmt_dev);
        if (ret)
index 0dd3c1f291da30cb8be62ac7742a841195ad22dd..0c3b48616a9f360fb0cd8167540cf813f2828c35 100644 (file)
@@ -1440,6 +1440,9 @@ static bool vduse_validate_config(struct vduse_dev_config *config)
        if (config->config_size > PAGE_SIZE)
                return false;
 
+       if (config->vq_num > 0xffff)
+               return false;
+
        if (!device_is_allowed(config->device_id))
                return false;
 
index d448db0c4de3f81810bc94008beea0345e202458..8fe267ca3e76f2483501030aa838b9773321bac6 100644 (file)
@@ -647,7 +647,7 @@ static void vp_vdpa_remove(struct pci_dev *pdev)
        mdev = vp_vdpa_mgtdev->mdev;
        vp_modern_remove(mdev);
        vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev);
-       kfree(&vp_vdpa_mgtdev->mgtdev.id_table);
+       kfree(vp_vdpa_mgtdev->mgtdev.id_table);
        kfree(mdev);
        kfree(vp_vdpa_mgtdev);
 }
index 166044642fd5cc268c867bb78ba91c351b9e3b3c..ec32f785dfdec1012f71ff73f3ca623d89a2f5b8 100644 (file)
@@ -65,6 +65,10 @@ static DEFINE_IDA(vhost_vdpa_ida);
 
 static dev_t vhost_vdpa_major;
 
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+                                  struct vhost_iotlb *iotlb, u64 start,
+                                  u64 last, u32 asid);
+
 static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
 {
        struct vhost_vdpa_as *as = container_of(iotlb, struct
@@ -135,7 +139,7 @@ static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
                return -EINVAL;
 
        hlist_del(&as->hash_link);
-       vhost_iotlb_reset(&as->iotlb);
+       vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
        kfree(as);
 
        return 0;
@@ -683,10 +687,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
        mutex_unlock(&d->mutex);
        return r;
 }
+static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
+                                    struct vhost_iotlb_map *map, u32 asid)
+{
+       struct vdpa_device *vdpa = v->vdpa;
+       const struct vdpa_config_ops *ops = vdpa->config;
+       if (ops->dma_map) {
+               ops->dma_unmap(vdpa, asid, map->start, map->size);
+       } else if (ops->set_map == NULL) {
+               iommu_unmap(v->domain, map->start, map->size);
+       }
+}
 
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
-                               struct vhost_iotlb *iotlb,
-                               u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+                               u64 start, u64 last, u32 asid)
 {
        struct vhost_dev *dev = &v->vdev;
        struct vhost_iotlb_map *map;
@@ -703,13 +717,13 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
                        unpin_user_page(page);
                }
                atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
+               vhost_vdpa_general_unmap(v, map, asid);
                vhost_iotlb_map_free(iotlb, map);
        }
 }
 
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
-                               struct vhost_iotlb *iotlb,
-                               u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+                               u64 start, u64 last, u32 asid)
 {
        struct vhost_iotlb_map *map;
        struct vdpa_map_file *map_file;
@@ -718,20 +732,21 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
                map_file = (struct vdpa_map_file *)map->opaque;
                fput(map_file->file);
                kfree(map_file);
+               vhost_vdpa_general_unmap(v, map, asid);
                vhost_iotlb_map_free(iotlb, map);
        }
 }
 
 static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
-                                  struct vhost_iotlb *iotlb,
-                                  u64 start, u64 last)
+                                  struct vhost_iotlb *iotlb, u64 start,
+                                  u64 last, u32 asid)
 {
        struct vdpa_device *vdpa = v->vdpa;
 
        if (vdpa->use_va)
-               return vhost_vdpa_va_unmap(v, iotlb, start, last);
+               return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
 
-       return vhost_vdpa_pa_unmap(v, iotlb, start, last);
+       return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
 }
 
 static int perm_to_iommu_flags(u32 perm)
@@ -798,17 +813,12 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
        const struct vdpa_config_ops *ops = vdpa->config;
        u32 asid = iotlb_to_asid(iotlb);
 
-       vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
+       vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
 
-       if (ops->dma_map) {
-               ops->dma_unmap(vdpa, asid, iova, size);
-       } else if (ops->set_map) {
+       if (ops->set_map) {
                if (!v->in_batch)
                        ops->set_map(vdpa, asid, iotlb);
-       } else {
-               iommu_unmap(v->domain, iova, size);
        }
-
        /* If we are in the middle of batch processing, delay the free
         * of AS until BATCH_END.
         */
@@ -1162,14 +1172,14 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
        struct vhost_vdpa_as *as;
        u32 asid;
 
-       vhost_dev_cleanup(&v->vdev);
-       kfree(v->vdev.vqs);
-
        for (asid = 0; asid < v->vdpa->nas; asid++) {
                as = asid_to_as(v, asid);
                if (as)
                        vhost_vdpa_remove_as(v, asid);
        }
+
+       vhost_dev_cleanup(&v->vdev);
+       kfree(v->vdev.vqs);
 }
 
 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
index 5c9fe3c9c3646529961fa187b2eebedf7a532c88..cbe72bfd2f1faa11132931511bf8b466b481789f 100644 (file)
@@ -2053,7 +2053,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
        struct vhost_dev *dev = vq->dev;
        struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
        struct iovec *_iov;
-       u64 s = 0;
+       u64 s = 0, last = addr + len - 1;
        int ret = 0;
 
        while ((u64)len > s) {
@@ -2063,7 +2063,7 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
                        break;
                }
 
-               map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
+               map = vhost_iotlb_itree_first(umem, addr, last);
                if (map == NULL || map->start > addr) {
                        if (umem != dev->iotlb) {
                                ret = -EFAULT;
index c9f5c8ea3afbd175246bb97f7dbf84d85df55fa2..33eb941fcf154631eb32b01b42ef8ba80dc4ef34 100644 (file)
@@ -1102,7 +1102,7 @@ static int iotlb_translate(const struct vringh *vrh,
        struct vhost_iotlb_map *map;
        struct vhost_iotlb *iotlb = vrh->iotlb;
        int ret = 0;
-       u64 s = 0;
+       u64 s = 0, last = addr + len - 1;
 
        spin_lock(vrh->iotlb_lock);
 
@@ -1114,8 +1114,7 @@ static int iotlb_translate(const struct vringh *vrh,
                        break;
                }
 
-               map = vhost_iotlb_itree_first(iotlb, addr,
-                                             addr + len - 1);
+               map = vhost_iotlb_itree_first(iotlb, addr, last);
                if (!map || map->start > addr) {
                        ret = -EINVAL;
                        break;
index cd6f7776013ac790efc85986188b35d27e8310b0..a2b3743723639cbbe8088c7c9ab420cdfb46a363 100644 (file)
@@ -959,7 +959,14 @@ static int __init vhost_vsock_init(void)
                                  VSOCK_TRANSPORT_F_H2G);
        if (ret < 0)
                return ret;
-       return misc_register(&vhost_vsock_misc);
+
+       ret = misc_register(&vhost_vsock_misc);
+       if (ret) {
+               vsock_core_unregister(&vhost_transport.transport);
+               return ret;
+       }
+
+       return 0;
 };
 
 static void __exit vhost_vsock_exit(void)
index df6e09f7d242226a229b605b3623aad8a0d7fbdf..b2bed599e6c6ea1c8014e537cf0f1d9151fc20d1 100644 (file)
@@ -456,8 +456,8 @@ config FB_ATARI
          chipset found in Ataris.
 
 config FB_OF
-       bool "Open Firmware frame buffer device support"
-       depends on (FB = y) && PPC && (!PPC_PSERIES || PCI)
+       tristate "Open Firmware frame buffer device support"
+       depends on FB && PPC && (!PPC_PSERIES || PCI)
        depends on !DRM_OFDRM
        select APERTURE_HELPERS
        select FB_CFB_FILLRECT
index 0ccf5d401ecbccb239b039991ae94188a5f507fb..d59215a4992e0a0d0a3ae7d09517fa0bca28ab95 100644 (file)
@@ -3192,8 +3192,7 @@ static void aty_init_lcd(struct atyfb_par *par, u32 bios_base)
                 * which we print to the screen.
                 */
                id = *(u8 *)par->lcd_table;
-               strncpy(model, (char *)par->lcd_table+1, 24);
-               model[23] = 0;
+               strscpy(model, (char *)par->lcd_table+1, sizeof(model));
 
                width = par->lcd_width = *(u16 *)(par->lcd_table+25);
                height = par->lcd_height = *(u16 *)(par->lcd_table+27);
index 0d3cee7ae7268c61f444dd8bf2f5401983e38a77..a043a737ea9f79a71fc34e4fca50ed449640612b 100644 (file)
@@ -1378,8 +1378,8 @@ static struct video_board vbG200 = {
        .lowlevel = &matrox_G100
 };
 static struct video_board vbG200eW = {
-       .maxvram = 0x100000,
-       .maxdisplayable = 0x800000,
+       .maxvram = 0x1000000,
+       .maxdisplayable = 0x0800000,
        .accelID = FB_ACCEL_MATROX_MGAG200,
        .lowlevel = &matrox_G100
 };
index 17cda576568382b041f379f91df5cb12504a0881..1f3df2055ff0d5670cc8b1b79a25937413f79a88 100644 (file)
@@ -1447,7 +1447,7 @@ static int fbinfo_init(struct omapfb_device *fbdev, struct fb_info *info)
        info->fbops = &omapfb_ops;
        info->flags = FBINFO_FLAG_DEFAULT;
 
-       strncpy(fix->id, MODULE_NAME, sizeof(fix->id));
+       strscpy(fix->id, MODULE_NAME, sizeof(fix->id));
 
        info->pseudo_palette = fbdev->pseudo_palette;
 
@@ -1573,8 +1573,7 @@ static int omapfb_find_ctrl(struct omapfb_device *fbdev)
 
        fbdev->ctrl = NULL;
 
-       strncpy(name, conf->lcd.ctrl_name, sizeof(name) - 1);
-       name[sizeof(name) - 1] = '\0';
+       strscpy(name, conf->lcd.ctrl_name, sizeof(name));
 
        if (strcmp(name, "internal") == 0) {
                fbdev->ctrl = fbdev->int_ctrl;
index 54b0f034c2edfdf0c0fa2a50a911646eeefd70e9..7cddb7b8ae3445bdc8932dc22670fba8a1eb89e6 100644 (file)
@@ -1536,22 +1536,28 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 {
        struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
        unsigned long flags;
-       struct dsi_irq_stats stats;
+       struct dsi_irq_stats *stats;
+
+       stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+       if (!stats) {
+               seq_printf(s, "out of memory\n");
+               return;
+       }
 
        spin_lock_irqsave(&dsi->irq_stats_lock, flags);
 
-       stats = dsi->irq_stats;
+       *stats = dsi->irq_stats;
        memset(&dsi->irq_stats, 0, sizeof(dsi->irq_stats));
        dsi->irq_stats.last_reset = jiffies;
 
        spin_unlock_irqrestore(&dsi->irq_stats_lock, flags);
 
        seq_printf(s, "period %u ms\n",
-                       jiffies_to_msecs(jiffies - stats.last_reset));
+                       jiffies_to_msecs(jiffies - stats->last_reset));
 
-       seq_printf(s, "irqs %d\n", stats.irq_count);
+       seq_printf(s, "irqs %d\n", stats->irq_count);
 #define PIS(x) \
-       seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1])
+       seq_printf(s, "%-20s %10d\n", #x, stats->dsi_irqs[ffs(DSI_IRQ_##x)-1])
 
        seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
        PIS(VC0);
@@ -1575,10 +1581,10 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 
 #define PIS(x) \
        seq_printf(s, "%-20s %10d %10d %10d %10d\n", #x, \
-                       stats.vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
-                       stats.vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
-                       stats.vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
-                       stats.vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
+                       stats->vc_irqs[0][ffs(DSI_VC_IRQ_##x)-1], \
+                       stats->vc_irqs[1][ffs(DSI_VC_IRQ_##x)-1], \
+                       stats->vc_irqs[2][ffs(DSI_VC_IRQ_##x)-1], \
+                       stats->vc_irqs[3][ffs(DSI_VC_IRQ_##x)-1]);
 
        seq_printf(s, "-- VC interrupts --\n");
        PIS(CS);
@@ -1594,7 +1600,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
 
 #define PIS(x) \
        seq_printf(s, "%-20s %10d\n", #x, \
-                       stats.cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
+                       stats->cio_irqs[ffs(DSI_CIO_IRQ_##x)-1]);
 
        seq_printf(s, "-- CIO interrupts --\n");
        PIS(ERRSYNCESC1);
@@ -1618,6 +1624,8 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
        PIS(ULPSACTIVENOT_ALL0);
        PIS(ULPSACTIVENOT_ALL1);
 #undef PIS
+
+       kfree(stats);
 }
 
 static void dsi1_dump_irqs(struct seq_file *s)
index 8752d389e3823f5ec81ee5fc35d268b87b22e0de..d7f3e6281ce48051b74b97a047c4fabda6a322f1 100644 (file)
@@ -67,7 +67,7 @@ MODULE_PARM_DESC(video,
        "Video memory size in MB, width, height in pixels (default 2,800,600)");
 
 static void xenfb_make_preferred_console(void);
-static int xenfb_remove(struct xenbus_device *);
+static void xenfb_remove(struct xenbus_device *);
 static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
 static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
 static void xenfb_disconnect_backend(struct xenfb_info *);
@@ -523,7 +523,7 @@ static int xenfb_resume(struct xenbus_device *dev)
        return xenfb_connect_backend(dev, info);
 }
 
-static int xenfb_remove(struct xenbus_device *dev)
+static void xenfb_remove(struct xenbus_device *dev)
 {
        struct xenfb_info *info = dev_get_drvdata(&dev->dev);
 
@@ -538,8 +538,6 @@ static int xenfb_remove(struct xenbus_device *dev)
        vfree(info->gfns);
        vfree(info->fb);
        kfree(info);
-
-       return 0;
 }
 
 static unsigned long vmalloc_to_gfn(void *address)
index 828ced060742358069ae33e46dd62899d5400760..b9a80aedee1b73fed28d7335081b95c75db73fcc 100644 (file)
@@ -15,7 +15,7 @@ static ssize_t device_show(struct device *_d,
                           struct device_attribute *attr, char *buf)
 {
        struct virtio_device *dev = dev_to_virtio(_d);
-       return sprintf(buf, "0x%04x\n", dev->id.device);
+       return sysfs_emit(buf, "0x%04x\n", dev->id.device);
 }
 static DEVICE_ATTR_RO(device);
 
@@ -23,7 +23,7 @@ static ssize_t vendor_show(struct device *_d,
                           struct device_attribute *attr, char *buf)
 {
        struct virtio_device *dev = dev_to_virtio(_d);
-       return sprintf(buf, "0x%04x\n", dev->id.vendor);
+       return sysfs_emit(buf, "0x%04x\n", dev->id.vendor);
 }
 static DEVICE_ATTR_RO(vendor);
 
@@ -31,7 +31,7 @@ static ssize_t status_show(struct device *_d,
                           struct device_attribute *attr, char *buf)
 {
        struct virtio_device *dev = dev_to_virtio(_d);
-       return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
+       return sysfs_emit(buf, "0x%08x\n", dev->config->get_status(dev));
 }
 static DEVICE_ATTR_RO(status);
 
@@ -39,7 +39,7 @@ static ssize_t modalias_show(struct device *_d,
                             struct device_attribute *attr, char *buf)
 {
        struct virtio_device *dev = dev_to_virtio(_d);
-       return sprintf(buf, "virtio:d%08Xv%08X\n",
+       return sysfs_emit(buf, "virtio:d%08Xv%08X\n",
                       dev->id.device, dev->id.vendor);
 }
 static DEVICE_ATTR_RO(modalias);
@@ -54,9 +54,9 @@ static ssize_t features_show(struct device *_d,
        /* We actually represent this as a bitstring, as it could be
         * arbitrary length in future. */
        for (i = 0; i < sizeof(dev->features)*8; i++)
-               len += sprintf(buf+len, "%c",
+               len += sysfs_emit_at(buf, len, "%c",
                               __virtio_test_bit(dev, i) ? '1' : '0');
-       len += sprintf(buf+len, "\n");
+       len += sysfs_emit_at(buf, len, "\n");
        return len;
 }
 static DEVICE_ATTR_RO(features);
index c3b9f27618497a54a13b367445542ae163961918..9e496e288cfad580c0fa0a601ea908765166a28b 100644 (file)
@@ -303,14 +303,14 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
        int err;
 
        if (index >= vp_modern_get_num_queues(mdev))
-               return ERR_PTR(-ENOENT);
+               return ERR_PTR(-EINVAL);
 
        /* Check if queue is either not available or already active. */
        num = vp_modern_get_queue_size(mdev, index);
        if (!num || vp_modern_get_queue_enable(mdev, index))
                return ERR_PTR(-ENOENT);
 
-       if (num & (num - 1)) {
+       if (!is_power_of_2(num)) {
                dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num);
                return ERR_PTR(-EINVAL);
        }
index 2e7689bb933b8e96322d5ca8d0a0f6162e0b2eae..723c4e29e1d3b84d556161f4ef239755899f4aea 100644 (file)
@@ -1052,7 +1052,7 @@ static int vring_alloc_queue_split(struct vring_virtqueue_split *vring_split,
        dma_addr_t dma_addr;
 
        /* We assume num is a power of 2. */
-       if (num & (num - 1)) {
+       if (!is_power_of_2(num)) {
                dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
                return -EINVAL;
        }
index f2ae2e563dc54ad51fb9241450c586e13d131d71..4a2ddf730a3acd173caead38a733bda7c4e7f7be 100644 (file)
@@ -1166,6 +1166,8 @@ int w1_process(void *data)
        /* remainder if it woke up early */
        unsigned long jremain = 0;
 
+       atomic_inc(&dev->refcnt);
+
        for (;;) {
 
                if (!jremain && dev->search_count) {
@@ -1193,8 +1195,10 @@ int w1_process(void *data)
                 */
                mutex_unlock(&dev->list_mutex);
 
-               if (kthread_should_stop())
+               if (kthread_should_stop()) {
+                       __set_current_state(TASK_RUNNING);
                        break;
+               }
 
                /* Only sleep when the search is active. */
                if (dev->search_count) {
index b3e1792d9c49fce5da50cdaf67e6278f4a95454e..3a71c5eb2f837fee980adcf2d3fbcfce3346e342 100644 (file)
@@ -51,10 +51,9 @@ static struct w1_master *w1_alloc_dev(u32 id, int slave_count, int slave_ttl,
        dev->search_count       = w1_search_count;
        dev->enable_pullup      = w1_enable_pullup;
 
-       /* 1 for w1_process to decrement
-        * 1 for __w1_remove_master_device to decrement
+       /* For __w1_remove_master_device to decrement
         */
-       atomic_set(&dev->refcnt, 2);
+       atomic_set(&dev->refcnt, 1);
 
        INIT_LIST_HEAD(&dev->slist);
        INIT_LIST_HEAD(&dev->async_list);
index 28b2a1fa25ab5836586de8989431bf5661df418c..0d4f8f4f494888898c759d0f14ddc28f594cdabc 100644 (file)
@@ -1181,9 +1181,8 @@ static void pvcalls_back_changed(struct xenbus_device *dev,
        }
 }
 
-static int pvcalls_back_remove(struct xenbus_device *dev)
+static void pvcalls_back_remove(struct xenbus_device *dev)
 {
-       return 0;
 }
 
 static int pvcalls_back_uevent(struct xenbus_device *xdev,
index 1826e8e671251d6cf36fba271fdee690523dcb03..d5d589bda243d4c8c45c24e82bde513a5c750ca3 100644 (file)
@@ -225,6 +225,8 @@ again:
        return IRQ_HANDLED;
 }
 
+static void free_active_ring(struct sock_mapping *map);
+
 static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
                                   struct sock_mapping *map)
 {
@@ -240,7 +242,7 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata,
        for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
                gnttab_end_foreign_access(map->active.ring->ref[i], NULL);
        gnttab_end_foreign_access(map->active.ref, NULL);
-       free_page((unsigned long)map->active.ring);
+       free_active_ring(map);
 
        kfree(map);
 }
@@ -1085,7 +1087,7 @@ static const struct xenbus_device_id pvcalls_front_ids[] = {
        { "" }
 };
 
-static int pvcalls_front_remove(struct xenbus_device *dev)
+static void pvcalls_front_remove(struct xenbus_device *dev)
 {
        struct pvcalls_bedata *bedata;
        struct sock_mapping *map = NULL, *n;
@@ -1121,7 +1123,6 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
        kfree(bedata->ring.sring);
        kfree(bedata);
        xenbus_switch_state(dev, XenbusStateClosed);
-       return 0;
 }
 
 static int pvcalls_front_probe(struct xenbus_device *dev,
index d171091eec123dda417e3b2b32c5aa70184826d3..b11e401f1b1ee92f753b86ad8532f6910aef4da0 100644 (file)
@@ -716,14 +716,12 @@ out:
        return err;
 }
 
-static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
+static void xen_pcibk_xenbus_remove(struct xenbus_device *dev)
 {
        struct xen_pcibk_device *pdev = dev_get_drvdata(&dev->dev);
 
        if (pdev != NULL)
                free_pdev(pdev);
-
-       return 0;
 }
 
 static const struct xenbus_device_id xen_pcibk_ids[] = {
index 6106ed93817d670be7001dec34146a3bdb74f013..954188b0b858a26e1cdf2195cddaecff6e8bbe5f 100644 (file)
@@ -1249,7 +1249,7 @@ static void scsiback_release_translation_entry(struct vscsibk_info *info)
        spin_unlock_irqrestore(&info->v2p_lock, flags);
 }
 
-static int scsiback_remove(struct xenbus_device *dev)
+static void scsiback_remove(struct xenbus_device *dev)
 {
        struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
 
@@ -1261,8 +1261,6 @@ static int scsiback_remove(struct xenbus_device *dev)
        gnttab_page_cache_shrink(&info->free_pages, 0);
 
        dev_set_drvdata(&dev->dev, NULL);
-
-       return 0;
 }
 
 static int scsiback_probe(struct xenbus_device *dev,
index cefa222f7881c77560d51844ec3a8b7a6231462a..8daeed31e1af949a01beba514664f21725fdebda 100644 (file)
@@ -880,7 +880,7 @@ affs_truncate(struct inode *inode)
        if (inode->i_size > AFFS_I(inode)->mmu_private) {
                struct address_space *mapping = inode->i_mapping;
                struct page *page;
-               void *fsdata;
+               void *fsdata = NULL;
                loff_t isize = inode->i_size;
                int res;
 
index 7dcd59693a0c2fc62f9cf9af0b5fdc5521a2d8ba..d4ddb20d673208f7fb082c2deee34e8debd27ceb 100644 (file)
@@ -13,6 +13,8 @@
 #include "internal.h"
 #include "afs_cm.h"
 #include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 static int afs_deliver_cb_init_call_back_state(struct afs_call *);
 static int afs_deliver_cb_init_call_back_state3(struct afs_call *);
@@ -191,7 +193,7 @@ static void afs_cm_destructor(struct afs_call *call)
  * Abort a service call from within an action function.
  */
 static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
-                                  const char *why)
+                                  enum rxrpc_abort_reason why)
 {
        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
                                abort_code, error, why);
@@ -469,7 +471,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
        if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
                afs_send_empty_reply(call);
        else
-               afs_abort_service_call(call, 1, 1, "K-1");
+               afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative);
 
        afs_put_call(call);
        _leave("");
index c62939e5ea1f0b9343f42866f194f9337d818e08..7817e2b860e5eb30a2c758d78a737615993bdd85 100644 (file)
@@ -13,6 +13,8 @@
 #include "internal.h"
 #include "afs_cm.h"
 #include "protocol_yfs.h"
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 struct workqueue_struct *afs_async_calls;
 
@@ -397,7 +399,8 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
 error_do_abort:
        if (ret != -ECONNABORTED) {
                rxrpc_kernel_abort_call(call->net->socket, rxcall,
-                                       RX_USER_ABORT, ret, "KSD");
+                                       RX_USER_ABORT, ret,
+                                       afs_abort_send_data_error);
        } else {
                len = 0;
                iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0);
@@ -527,7 +530,8 @@ static void afs_deliver_to_call(struct afs_call *call)
                case -ENOTSUPP:
                        abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KIV");
+                                               abort_code, ret,
+                                               afs_abort_op_not_supported);
                        goto local_abort;
                case -EIO:
                        pr_err("kAFS: Call %u in bad state %u\n",
@@ -542,12 +546,14 @@ static void afs_deliver_to_call(struct afs_call *call)
                        if (state != AFS_CALL_CL_AWAIT_REPLY)
                                abort_code = RXGEN_SS_UNMARSHAL;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KUM");
+                                               abort_code, ret,
+                                               afs_abort_unmarshal_error);
                        goto local_abort;
                default:
                        abort_code = RX_CALL_DEAD;
                        rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                               abort_code, ret, "KER");
+                                               abort_code, ret,
+                                               afs_abort_general_error);
                        goto local_abort;
                }
        }
@@ -619,7 +625,8 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
                        /* Kill off the call if it's still live. */
                        _debug("call interrupted");
                        if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
-                                                   RX_USER_ABORT, -EINTR, "KWI"))
+                                                   RX_USER_ABORT, -EINTR,
+                                                   afs_abort_interrupted))
                                afs_set_call_complete(call, -EINTR, 0);
                }
        }
@@ -836,7 +843,8 @@ void afs_send_empty_reply(struct afs_call *call)
        case -ENOMEM:
                _debug("oom");
                rxrpc_kernel_abort_call(net->socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       afs_abort_oom);
                fallthrough;
        default:
                _leave(" [error]");
@@ -878,7 +886,8 @@ void afs_send_simple_reply(struct afs_call *call, const void *buf, size_t len)
        if (n == -ENOMEM) {
                _debug("oom");
                rxrpc_kernel_abort_call(net->socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "KOO");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       afs_abort_oom);
        }
        _leave(" [error]");
 }
@@ -900,6 +909,7 @@ int afs_extract_data(struct afs_call *call, bool want_more)
        ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter,
                                     &call->iov_len, want_more, &remote_abort,
                                     &call->service_id);
+       trace_afs_receive_data(call, call->iter, want_more, ret);
        if (ret == 0 || ret == -EAGAIN)
                return ret;
 
index de63572a94044ac954b76b96bd0cc669e45d3075..9a780fafc5397ec1c1560c9dcfe2edd21b8250b4 100644 (file)
@@ -2034,7 +2034,7 @@ static int elf_core_dump(struct coredump_params *cprm)
         * The number of segs are recored into ELF header as 16bit value.
         * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
         */
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -2074,7 +2074,7 @@ static int elf_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 096e3520a0b10659a7c113e0338b40ebc67bfeff..a05eafcacfb271a1f30b633b0b922ec21e178043 100644 (file)
@@ -1509,7 +1509,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        tmp->next = thread_list;
        thread_list = tmp;
 
-       segs = cprm->vma_count + elf_core_extra_phdrs();
+       segs = cprm->vma_count + elf_core_extra_phdrs(cprm);
 
        /* for notes section */
        segs++;
@@ -1555,7 +1555,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
        dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
 
        offset += cprm->vma_data_size;
-       offset += elf_core_extra_data_size();
+       offset += elf_core_extra_data_size(cprm);
        e_shoff = offset;
 
        if (e_phnum == PN_XNUM) {
index 21c92c74bf71a4742a69902d8037531fb7b86b31..46851511b661b38810bde878648d939aeeeb9331 100644 (file)
@@ -484,6 +484,7 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
        u64 wanted_disk_byte = ref->wanted_disk_byte;
        u64 count = 0;
        u64 data_offset;
+       u8 type;
 
        if (level != 0) {
                eb = path->nodes[level];
@@ -538,6 +539,9 @@ static int add_all_parents(struct btrfs_backref_walk_ctx *ctx,
                        continue;
                }
                fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
+               type = btrfs_file_extent_type(eb, fi);
+               if (type == BTRFS_FILE_EXTENT_INLINE)
+                       goto next;
                disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
                data_offset = btrfs_file_extent_offset(eb, fi);
 
index b8fb7ef6b520600994a33e723d1c44993df13e24..8affc88b0e0a4b68ed61e08fd1552331fd9c8517 100644 (file)
@@ -329,7 +329,16 @@ int btrfs_repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
                                      &map_length, &bioc, mirror_num);
                if (ret)
                        goto out_counter_dec;
-               BUG_ON(mirror_num != bioc->mirror_num);
+               /*
+                * This happens when dev-replace is also running, and the
+                * mirror_num indicates the dev-replace target.
+                *
+                * In this case, we don't need to do anything, as the read
+                * error just means the replace progress hasn't reached our
+                * read range, and later replace routine would handle it well.
+                */
+               if (mirror_num != bioc->mirror_num)
+                       goto out_counter_dec;
        }
 
        sector = bioc->stripes[bioc->mirror_num - 1].physical >> 9;
index 0a3c261b69c9f95ec56c1623bcc7f695bb0bdc54..d81b764a76446ba2b083995c7a40b1c8c0d6be5c 100644 (file)
@@ -358,8 +358,10 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
                goto out;
 
        path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       if (!path) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        level = btrfs_header_level(root->node);
 
index 0888d484df80c93245e9d77cccabd3e093e0fc00..3aa04224315eb383f3b61d8a278b02e68e01bb56 100644 (file)
@@ -367,7 +367,14 @@ error:
        btrfs_print_tree(eb, 0);
        btrfs_err(fs_info, "block=%llu write time tree block corruption detected",
                  eb->start);
-       WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG));
+       /*
+        * Be noisy if this is an extent buffer from a log tree. We don't abort
+        * a transaction in case there's a bad log tree extent buffer, we just
+        * fallback to a transaction commit. Still we want to know when there is
+        * a bad log tree extent buffer, as that may signal a bug somewhere.
+        */
+       WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) ||
+               btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID);
        return ret;
 }
 
@@ -530,6 +537,9 @@ static int validate_extent_buffer(struct extent_buffer *eb,
        }
 
        if (found_level != check->level) {
+               btrfs_err(fs_info,
+               "level verify failed on logical %llu mirror %u wanted %u found %u",
+                         eb->start, eb->read_mirror, check->level, found_level);
                ret = -EIO;
                goto out;
        }
@@ -3381,6 +3391,8 @@ out:
 /*
  * Do various sanity and dependency checks of different features.
  *
+ * @is_rw_mount:       If the mount is read-write.
+ *
  * This is the place for less strict checks (like for subpage or artificial
  * feature dependencies).
  *
@@ -3391,7 +3403,7 @@ out:
  * (space cache related) can modify on-disk format like free space tree and
  * screw up certain feature dependencies.
  */
-int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb)
+int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount)
 {
        struct btrfs_super_block *disk_super = fs_info->super_copy;
        u64 incompat = btrfs_super_incompat_flags(disk_super);
@@ -3430,7 +3442,7 @@ int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb)
        if (btrfs_super_nodesize(disk_super) > PAGE_SIZE)
                incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
 
-       if (compat_ro_unsupp && !sb_rdonly(sb)) {
+       if (compat_ro_unsupp && is_rw_mount) {
                btrfs_err(fs_info,
        "cannot mount read-write because of unknown compat_ro features (0x%llx)",
                       compat_ro);
@@ -3633,7 +3645,7 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
                goto fail_alloc;
        }
 
-       ret = btrfs_check_features(fs_info, sb);
+       ret = btrfs_check_features(fs_info, !sb_rdonly(sb));
        if (ret < 0) {
                err = ret;
                goto fail_alloc;
index 363935cfc084464bcb3bb7386b5eea54e511f307..f2f295eb6103da3dd00e1d30c086a910c41747a5 100644 (file)
@@ -50,7 +50,7 @@ int __cold open_ctree(struct super_block *sb,
 void __cold close_ctree(struct btrfs_fs_info *fs_info);
 int btrfs_validate_super(struct btrfs_fs_info *fs_info,
                         struct btrfs_super_block *sb, int mirror_num);
-int btrfs_check_features(struct btrfs_fs_info *fs_info, struct super_block *sb);
+int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount);
 int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
 struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev);
 struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev,
index 9ae9cd1e70352a615de5cfcf07a9353632c931d6..3c7766dfaa694a45fdc82bf8d6704318813b0110 100644 (file)
@@ -1551,7 +1551,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
        u64 last = 0;
        int found = 0;
 
-       if (WARN_ON(search_end <= cur_start))
+       if (WARN_ON(search_end < cur_start))
                return 0;
 
        spin_lock(&tree->lock);
index 892d78c1853c763878ac674634ba99c46677f1ea..72ba13b027a9e6b971bfd0446043815154f854fa 100644 (file)
@@ -1713,6 +1713,11 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
                BUG();
        if (ret && insert_reserved)
                btrfs_pin_extent(trans, node->bytenr, node->num_bytes, 1);
+       if (ret < 0)
+               btrfs_err(trans->fs_info,
+"failed to run delayed ref for logical %llu num_bytes %llu type %u action %u ref_mod %d: %d",
+                         node->bytenr, node->num_bytes, node->type,
+                         node->action, node->ref_mod, ret);
        return ret;
 }
 
@@ -1954,8 +1959,6 @@ static int btrfs_run_delayed_refs_for_head(struct btrfs_trans_handle *trans,
                if (ret) {
                        unselect_delayed_ref_head(delayed_refs, locked_ref);
                        btrfs_put_delayed_ref(ref);
-                       btrfs_debug(fs_info, "run_one_delayed_ref returned %d",
-                                   ret);
                        return ret;
                }
 
index 83dd3aa5966357bafca78ca9aeb31a95968b31be..9bd32daa9b9a6fc5a316003a65b7c71d3b236ea6 100644 (file)
@@ -103,6 +103,15 @@ struct btrfs_bio_ctrl {
        u32 len_to_oe_boundary;
        btrfs_bio_end_io_t end_io_func;
 
+       /*
+        * This is for metadata read, to provide the extra needed verification
+        * info.  This has to be provided for submit_one_bio(), as
+        * submit_one_bio() can submit a bio if it ends at stripe boundary.  If
+        * no such parent_check is provided, the metadata can hit false alert at
+        * endio time.
+        */
+       struct btrfs_tree_parent_check *parent_check;
+
        /*
         * Tell writepage not to lock the state bits for this range, it still
         * does the unlocking.
@@ -133,13 +142,24 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
 
        btrfs_bio(bio)->file_offset = page_offset(bv->bv_page) + bv->bv_offset;
 
-       if (!is_data_inode(&inode->vfs_inode))
+       if (!is_data_inode(&inode->vfs_inode)) {
+               if (btrfs_op(bio) != BTRFS_MAP_WRITE) {
+                       /*
+                        * For metadata read, we should have the parent_check,
+                        * and copy it to bbio for metadata verification.
+                        */
+                       ASSERT(bio_ctrl->parent_check);
+                       memcpy(&btrfs_bio(bio)->parent_check,
+                              bio_ctrl->parent_check,
+                              sizeof(struct btrfs_tree_parent_check));
+               }
                btrfs_submit_metadata_bio(inode, bio, mirror_num);
-       else if (btrfs_op(bio) == BTRFS_MAP_WRITE)
+       } else if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
                btrfs_submit_data_write_bio(inode, bio, mirror_num);
-       else
+       } else {
                btrfs_submit_data_read_bio(inode, bio, mirror_num,
                                           bio_ctrl->compress_type);
+       }
 
        /* The bio is owned by the end_io handler now */
        bio_ctrl->bio = NULL;
@@ -4829,6 +4849,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
        struct extent_state *cached_state = NULL;
        struct btrfs_bio_ctrl bio_ctrl = {
                .mirror_num = mirror_num,
+               .parent_check = check,
        };
        int ret = 0;
 
@@ -4878,7 +4899,6 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait,
                 */
                atomic_dec(&eb->io_pages);
        }
-       memcpy(&btrfs_bio(bio_ctrl.bio)->parent_check, check, sizeof(*check));
        submit_one_bio(&bio_ctrl);
        if (ret || wait != WAIT_COMPLETE) {
                free_extent_state(cached_state);
@@ -4905,6 +4925,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
        unsigned long num_reads = 0;
        struct btrfs_bio_ctrl bio_ctrl = {
                .mirror_num = mirror_num,
+               .parent_check = check,
        };
 
        if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags))
@@ -4996,7 +5017,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num,
                }
        }
 
-       memcpy(&btrfs_bio(bio_ctrl.bio)->parent_check, check, sizeof(*check));
        submit_one_bio(&bio_ctrl);
 
        if (ret || wait != WAIT_COMPLETE)
index 91b00eb2440e7d098bcf68e6bceed22c03e0d53c..af046d22300e2ccdcbce0984383a4e5e0a7feef4 100644 (file)
@@ -3354,7 +3354,7 @@ bool btrfs_find_delalloc_in_range(struct btrfs_inode *inode, u64 start, u64 end,
        bool search_io_tree = true;
        bool ret = false;
 
-       while (cur_offset < end) {
+       while (cur_offset <= end) {
                u64 delalloc_start;
                u64 delalloc_end;
                bool delalloc;
@@ -3541,6 +3541,7 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
                struct extent_buffer *leaf = path->nodes[0];
                struct btrfs_file_extent_item *extent;
                u64 extent_end;
+               u8 type;
 
                if (path->slots[0] >= btrfs_header_nritems(leaf)) {
                        ret = btrfs_next_leaf(root, path);
@@ -3596,10 +3597,16 @@ static loff_t find_desired_extent(struct file *file, loff_t offset, int whence)
 
                extent = btrfs_item_ptr(leaf, path->slots[0],
                                        struct btrfs_file_extent_item);
+               type = btrfs_file_extent_type(leaf, extent);
 
-               if (btrfs_file_extent_disk_bytenr(leaf, extent) == 0 ||
-                   btrfs_file_extent_type(leaf, extent) ==
-                   BTRFS_FILE_EXTENT_PREALLOC) {
+               /*
+                * Can't access the extent's disk_bytenr field if this is an
+                * inline extent, since at that offset, it's where the extent
+                * data starts.
+                */
+               if (type == BTRFS_FILE_EXTENT_PREALLOC ||
+                   (type == BTRFS_FILE_EXTENT_REG &&
+                    btrfs_file_extent_disk_bytenr(leaf, extent) == 0)) {
                        /*
                         * Explicit hole or prealloc extent, search for delalloc.
                         * A prealloc extent is treated like a hole.
index a749367e5ae2a2b9090a44ec8c0dfc8536f92131..37b86acfcbcf880ded4a2fbed2407a0e6f905982 100644 (file)
@@ -119,6 +119,12 @@ enum {
        /* Indicate that we want to commit the transaction. */
        BTRFS_FS_NEED_TRANS_COMMIT,
 
+       /*
+        * Indicate metadata over-commit is disabled. This is set when active
+        * zone tracking is needed.
+        */
+       BTRFS_FS_NO_OVERCOMMIT,
+
 #if BITS_PER_LONG == 32
        /* Indicate if we have error/warn message printed on 32bit systems */
        BTRFS_FS_32BIT_ERROR,
index 8bcad994015487e9a049c7f0d99b1f685176afa8..98a800b8bd438b53e1ef21647bd6ebe95d385256 100644 (file)
@@ -7092,7 +7092,7 @@ next:
                 * Other members are not utilized for inline extents.
                 */
                ASSERT(em->block_start == EXTENT_MAP_INLINE);
-               ASSERT(em->len = fs_info->sectorsize);
+               ASSERT(em->len == fs_info->sectorsize);
 
                ret = read_inline_extent(inode, path, page);
                if (ret < 0)
@@ -9377,8 +9377,10 @@ static int btrfs_rename(struct user_namespace *mnt_userns,
 
        if (flags & RENAME_WHITEOUT) {
                whiteout_args.inode = new_whiteout_inode(mnt_userns, old_dir);
-               if (!whiteout_args.inode)
-                       return -ENOMEM;
+               if (!whiteout_args.inode) {
+                       ret = -ENOMEM;
+                       goto out_fscrypt_names;
+               }
                ret = btrfs_new_inode_prepare(&whiteout_args, &trans_num_items);
                if (ret)
                        goto out_whiteout_inode;
index 5c636e00d77da395a3dbccecadb17b4a02d3879a..af97413abcf43bd7446d79d1e5a5e2a8e9d57872 100644 (file)
@@ -2765,9 +2765,19 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
 
                        /*
                         * Old roots should be searched when inserting qgroup
-                        * extent record
+                        * extent record.
+                        *
+                        * But for INCONSISTENT (NO_ACCOUNTING) -> rescan case,
+                        * we may have some record inserted during
+                        * NO_ACCOUNTING (thus no old_roots populated), but
+                        * later we start rescan, which clears NO_ACCOUNTING,
+                        * leaving some inserted records without old_roots
+                        * populated.
+                        *
+                        * Those cases are rare and should not cause too much
+                        * time spent during commit_transaction().
                         */
-                       if (WARN_ON(!record->old_roots)) {
+                       if (!record->old_roots) {
                                /* Search commit root to find old_roots */
                                ret = btrfs_find_all_roots(&ctx, false);
                                if (ret < 0)
@@ -2787,6 +2797,7 @@ int btrfs_qgroup_account_extents(struct btrfs_trans_handle *trans)
                         * current root. It's safe inside commit_transaction().
                         */
                        ctx.trans = trans;
+                       ctx.time_seq = BTRFS_SEQ_LAST;
                        ret = btrfs_find_all_roots(&ctx, false);
                        if (ret < 0)
                                goto cleanup;
@@ -3356,6 +3367,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        int err = -ENOMEM;
        int ret = 0;
        bool stopped = false;
+       bool did_leaf_rescans = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3376,6 +3388,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
                }
 
                err = qgroup_rescan_leaf(trans, path);
+               did_leaf_rescans = true;
 
                if (err > 0)
                        btrfs_commit_transaction(trans);
@@ -3396,16 +3409,23 @@ out:
        mutex_unlock(&fs_info->qgroup_rescan_lock);
 
        /*
-        * only update status, since the previous part has already updated the
-        * qgroup info.
+        * Only update status, since the previous part has already updated the
+        * qgroup info, and only if we did any actual work. This also prevents
+        * race with a concurrent quota disable, which has already set
+        * fs_info->quota_root to NULL and cleared BTRFS_FS_QUOTA_ENABLED at
+        * btrfs_quota_disable().
         */
-       trans = btrfs_start_transaction(fs_info->quota_root, 1);
-       if (IS_ERR(trans)) {
-               err = PTR_ERR(trans);
+       if (did_leaf_rescans) {
+               trans = btrfs_start_transaction(fs_info->quota_root, 1);
+               if (IS_ERR(trans)) {
+                       err = PTR_ERR(trans);
+                       trans = NULL;
+                       btrfs_err(fs_info,
+                                 "fail to start transaction for status update: %d",
+                                 err);
+               }
+       } else {
                trans = NULL;
-               btrfs_err(fs_info,
-                         "fail to start transaction for status update: %d",
-                         err);
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
index 2d90a6b5eb00e319c36aed30d330cae6a622ceb2..6a2cf754912df27a9573aeb5f11e8bc85d217aa0 100644 (file)
@@ -2646,7 +2646,7 @@ static int recover_scrub_rbio(struct btrfs_raid_bio *rbio)
        void **pointers = NULL;
        void **unmap_array = NULL;
        int sector_nr;
-       int ret;
+       int ret = 0;
 
        /*
         * @pointers array stores the pointer for each sector.
index d28ee4e36f3d907e7ce9f8354e56a9f1810be2fc..69c09508afb506ac8121caeae39fb2e7071362c0 100644 (file)
@@ -407,7 +407,8 @@ int btrfs_can_overcommit(struct btrfs_fs_info *fs_info,
                return 0;
 
        used = btrfs_space_info_used(space_info, true);
-       if (btrfs_is_zoned(fs_info) && (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
+       if (test_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags) &&
+           (space_info->flags & BTRFS_BLOCK_GROUP_METADATA))
                avail = 0;
        else
                avail = calc_available_free_space(fs_info, space_info, flush);
index 93f52ee85f6fe9735f379ff9fae3169eba042eb6..433ce221dc5c79a1c2b8d1a32a09473ae26a7205 100644 (file)
@@ -1705,7 +1705,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
        if (ret)
                goto restore;
 
-       ret = btrfs_check_features(fs_info, sb);
+       ret = btrfs_check_features(fs_info, !(*flags & SB_RDONLY));
        if (ret < 0)
                goto restore;
 
@@ -2514,6 +2514,7 @@ static __always_inline void btrfs_exit_btrfs_fs(void)
 static void __exit exit_btrfs_fs(void)
 {
        btrfs_exit_btrfs_fs();
+       btrfs_cleanup_fs_uuids();
 }
 
 static int __init init_btrfs_fs(void)
index a3c43f0b1c95c0aafbc61cccf963aad8850f6dc4..d43261545264e3a33a2050669cf7a4a8d90d250a 100644 (file)
@@ -2980,7 +2980,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                ret = 0;
        if (ret) {
                blk_finish_plug(&plug);
-               btrfs_abort_transaction(trans, ret);
                btrfs_set_log_full_commit(trans);
                mutex_unlock(&root->log_mutex);
                goto out;
@@ -3045,15 +3044,12 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
 
                blk_finish_plug(&plug);
                btrfs_set_log_full_commit(trans);
-
-               if (ret != -ENOSPC) {
-                       btrfs_abort_transaction(trans, ret);
-                       mutex_unlock(&log_root_tree->log_mutex);
-                       goto out;
-               }
+               if (ret != -ENOSPC)
+                       btrfs_err(fs_info,
+                                 "failed to update log for root %llu ret %d",
+                                 root->root_key.objectid, ret);
                btrfs_wait_tree_log_extents(log, mark);
                mutex_unlock(&log_root_tree->log_mutex);
-               ret = BTRFS_LOG_FORCE_COMMIT;
                goto out;
        }
 
@@ -3112,7 +3108,6 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out_wake_log_root;
        } else if (ret) {
                btrfs_set_log_full_commit(trans);
-               btrfs_abort_transaction(trans, ret);
                mutex_unlock(&log_root_tree->log_mutex);
                goto out_wake_log_root;
        }
@@ -3826,7 +3821,10 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                                              path->slots[0]);
                        if (tmp.type == BTRFS_DIR_INDEX_KEY)
                                last_old_dentry_offset = tmp.offset;
+               } else if (ret < 0) {
+                       err = ret;
                }
+
                goto done;
        }
 
@@ -3846,19 +3844,34 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                 */
                if (tmp.type == BTRFS_DIR_INDEX_KEY)
                        last_old_dentry_offset = tmp.offset;
+       } else if (ret < 0) {
+               err = ret;
+               goto done;
        }
+
        btrfs_release_path(path);
 
        /*
-        * Find the first key from this transaction again.  See the note for
-        * log_new_dir_dentries, if we're logging a directory recursively we
-        * won't be holding its i_mutex, which means we can modify the directory
-        * while we're logging it.  If we remove an entry between our first
-        * search and this search we'll not find the key again and can just
-        * bail.
+        * Find the first key from this transaction again or the one we were at
+        * in the loop below in case we had to reschedule. We may be logging the
+        * directory without holding its VFS lock, which happen when logging new
+        * dentries (through log_new_dir_dentries()) or in some cases when we
+        * need to log the parent directory of an inode. This means a dir index
+        * key might be deleted from the inode's root, and therefore we may not
+        * find it anymore. If we can't find it, just move to the next key. We
+        * can not bail out and ignore, because if we do that we will simply
+        * not log dir index keys that come after the one that was just deleted
+        * and we can end up logging a dir index range that ends at (u64)-1
+        * (@last_offset is initialized to that), resulting in removing dir
+        * entries we should not remove at log replay time.
         */
 search:
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
+       if (ret > 0)
+               ret = btrfs_next_item(root, path);
+       if (ret < 0)
+               err = ret;
+       /* If ret is 1, there are no more keys in the inode's root. */
        if (ret != 0)
                goto done;
 
@@ -5580,8 +5593,10 @@ static int add_conflicting_inode(struct btrfs_trans_handle *trans,
         * LOG_INODE_EXISTS mode) and slow down other fsyncs or transaction
         * commits.
         */
-       if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES)
+       if (ctx->num_conflict_inodes >= MAX_CONFLICT_INODES) {
+               btrfs_set_log_full_commit(trans);
                return BTRFS_LOG_FORCE_COMMIT;
+       }
 
        inode = btrfs_iget(root->fs_info->sb, ino, root);
        /*
@@ -7459,8 +7474,11 @@ void btrfs_log_new_name(struct btrfs_trans_handle *trans,
                 * not fail, but if it does, it's not serious, just bail out and
                 * mark the log for a full commit.
                 */
-               if (WARN_ON_ONCE(ret < 0))
+               if (WARN_ON_ONCE(ret < 0)) {
+                       fscrypt_free_filename(&fname);
                        goto out;
+               }
+
                log_pinned = true;
 
                path = btrfs_alloc_path();
index aa25fa335d3ed16d9133a4ce4c535830f994248c..bcfef75b97da0de6315e0ab76da7c03e272a5a79 100644 (file)
@@ -768,8 +768,11 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                                        BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
 
        error = lookup_bdev(path, &path_devt);
-       if (error)
+       if (error) {
+               btrfs_err(NULL, "failed to lookup block device for path %s: %d",
+                         path, error);
                return ERR_PTR(error);
+       }
 
        if (fsid_change_in_progress) {
                if (!has_metadata_uuid)
@@ -836,6 +839,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                unsigned int nofs_flag;
 
                if (fs_devices->opened) {
+                       btrfs_err(NULL,
+               "device %s belongs to fsid %pU, and the fs is already mounted",
+                                 path, fs_devices->fsid);
                        mutex_unlock(&fs_devices->device_list_mutex);
                        return ERR_PTR(-EBUSY);
                }
@@ -905,6 +911,9 @@ static noinline struct btrfs_device *device_list_add(const char *path,
                         * generation are equal.
                         */
                        mutex_unlock(&fs_devices->device_list_mutex);
+                       btrfs_err(NULL,
+"device %s already registered with a higher generation, found %llu expect %llu",
+                                 path, found_transid, device->generation);
                        return ERR_PTR(-EEXIST);
                }
 
@@ -2005,42 +2014,42 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
        return num_devices;
 }
 
+static void btrfs_scratch_superblock(struct btrfs_fs_info *fs_info,
+                                    struct block_device *bdev, int copy_num)
+{
+       struct btrfs_super_block *disk_super;
+       const size_t len = sizeof(disk_super->magic);
+       const u64 bytenr = btrfs_sb_offset(copy_num);
+       int ret;
+
+       disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr);
+       if (IS_ERR(disk_super))
+               return;
+
+       memset(&disk_super->magic, 0, len);
+       folio_mark_dirty(virt_to_folio(disk_super));
+       btrfs_release_disk_super(disk_super);
+
+       ret = sync_blockdev_range(bdev, bytenr, bytenr + len - 1);
+       if (ret)
+               btrfs_warn(fs_info, "error clearing superblock number %d (%d)",
+                       copy_num, ret);
+}
+
 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
                               struct block_device *bdev,
                               const char *device_path)
 {
-       struct btrfs_super_block *disk_super;
        int copy_num;
 
        if (!bdev)
                return;
 
        for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
-               struct page *page;
-               int ret;
-
-               disk_super = btrfs_read_dev_one_super(bdev, copy_num, false);
-               if (IS_ERR(disk_super))
-                       continue;
-
-               if (bdev_is_zoned(bdev)) {
+               if (bdev_is_zoned(bdev))
                        btrfs_reset_sb_log_zones(bdev, copy_num);
-                       continue;
-               }
-
-               memset(&disk_super->magic, 0, sizeof(disk_super->magic));
-
-               page = virt_to_page(disk_super);
-               set_page_dirty(page);
-               lock_page(page);
-               /* write_on_page() unlocks the page */
-               ret = write_one_page(page);
-               if (ret)
-                       btrfs_warn(fs_info,
-                               "error clearing superblock number %d (%d)",
-                               copy_num, ret);
-               btrfs_release_disk_super(disk_super);
-
+               else
+                       btrfs_scratch_superblock(fs_info, bdev, copy_num);
        }
 
        /* Notify udev that device has changed */
index a759668477bb2e10eee5f6b60fb03463cd81fcf8..1f503e8e42d483a765b71b9b5910fce0ede0ea6f 100644 (file)
@@ -539,6 +539,8 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device, bool populate_cache)
                }
                atomic_set(&zone_info->active_zones_left,
                           max_active_zones - nactive);
+               /* Overcommit does not work well with active zone tacking. */
+               set_bit(BTRFS_FS_NO_OVERCOMMIT, &fs_info->flags);
        }
 
        /* Validate superblock log */
index 4b159f97fe7b522974a1b1a2780f7fb3beefbbea..f75ad432f375f882abd478bd1fd585568c705721 100644 (file)
@@ -2913,7 +2913,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
 
        while (true) {
                flags &= CEPH_FILE_MODE_MASK;
-               if (atomic_read(&fi->num_locks))
+               if (vfs_inode_has_locks(inode))
                        flags |= CHECK_FILELOCK;
                _got = 0;
                ret = try_get_cap_refs(inode, need, want, endoff,
index f3b461c708a8bea40554813ddc7287ba6bd7f428..9c8dc8a55e7e13e05fed766ea8505aa6ae9c6b62 100644 (file)
@@ -32,24 +32,36 @@ void __init ceph_flock_init(void)
 
 static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
 {
-       struct ceph_file_info *fi = dst->fl_file->private_data;
        struct inode *inode = file_inode(dst->fl_file);
        atomic_inc(&ceph_inode(inode)->i_filelock_ref);
-       atomic_inc(&fi->num_locks);
+       dst->fl_u.ceph.inode = igrab(inode);
 }
 
+/*
+ * Do not use the 'fl->fl_file' in release function, which
+ * is possibly already released by another thread.
+ */
 static void ceph_fl_release_lock(struct file_lock *fl)
 {
-       struct ceph_file_info *fi = fl->fl_file->private_data;
-       struct inode *inode = file_inode(fl->fl_file);
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       atomic_dec(&fi->num_locks);
+       struct inode *inode = fl->fl_u.ceph.inode;
+       struct ceph_inode_info *ci;
+
+       /*
+        * If inode is NULL it should be a request file_lock,
+        * nothing we can do.
+        */
+       if (!inode)
+               return;
+
+       ci = ceph_inode(inode);
        if (atomic_dec_and_test(&ci->i_filelock_ref)) {
                /* clear error when all locks are released */
                spin_lock(&ci->i_ceph_lock);
                ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
                spin_unlock(&ci->i_ceph_lock);
        }
+       fl->fl_u.ceph.inode = NULL;
+       iput(inode);
 }
 
 static const struct file_lock_operations ceph_fl_lock_ops = {
index 30bdb391a0dc088d6c72274539386228ebd3cdaa..0ed3be75bb9ae176ac12555e8f6c86a5690519d4 100644 (file)
@@ -790,7 +790,6 @@ struct ceph_file_info {
        struct list_head rw_contexts;
 
        u32 filp_gen;
-       atomic_t num_locks;
 };
 
 struct ceph_dir_file_info {
index 5db73c0f792a5565f6fdca2c445d3d210a76a4de..cbc18b4a9cb20c28959712c1a291ee9c4e085899 100644 (file)
@@ -278,6 +278,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
         * ( for NTLMSSP_AV_NB_DOMAIN_NAME followed by NTLMSSP_AV_EOL ) +
         * unicode length of a netbios domain name
         */
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.len = size + 2 * dlen;
        ses->auth_key.response = kzalloc(ses->auth_key.len, GFP_KERNEL);
        if (!ses->auth_key.response) {
index d371259d6808a20a1383b3675b7ed693b0191ccc..b2a04b4e89a5e78cc8e37a8c7126521f290c9ecf 100644 (file)
@@ -2606,11 +2606,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
        INIT_LIST_HEAD(&tcon->pending_opens);
        tcon->status = TID_GOOD;
 
-       /* schedule query interfaces poll */
        INIT_DELAYED_WORK(&tcon->query_interfaces,
                          smb2_query_server_interfaces);
-       queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
-                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       if (ses->server->dialect >= SMB30_PROT_ID &&
+           (ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
+               /* schedule query interfaces poll */
+               queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+                                  (SMB_INTERFACE_POLL_INTERVAL * HZ));
+       }
 
        spin_lock(&cifs_tcp_ses_lock);
        list_add(&tcon->tcon_list, &ses->tcon_list);
index b541e68378f647c2b89d891952bfa0e0362649d2..b64d20374b9c853d4402ef3f8617dbfa57921d94 100644 (file)
@@ -327,8 +327,8 @@ static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb
        return rc;
 }
 
-static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
-                                      size_t tcp_host_len, char *share, bool *target_match)
+static int target_share_matches_server(struct TCP_Server_Info *server, char *share,
+                                      bool *target_match)
 {
        int rc = 0;
        const char *dfs_host;
@@ -338,13 +338,16 @@ static int target_share_matches_server(struct TCP_Server_Info *server, const cha
        extract_unc_hostname(share, &dfs_host, &dfs_host_len);
 
        /* Check if hostnames or addresses match */
-       if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
-               cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
-                        dfs_host, (int)tcp_host_len, tcp_host);
+       cifs_server_lock(server);
+       if (dfs_host_len != strlen(server->hostname) ||
+           strncasecmp(dfs_host, server->hostname, dfs_host_len)) {
+               cifs_dbg(FYI, "%s: %.*s doesn't match %s\n", __func__,
+                        (int)dfs_host_len, dfs_host, server->hostname);
                rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
                if (rc)
                        cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
        }
+       cifs_server_unlock(server);
        return rc;
 }
 
@@ -358,13 +361,9 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
        struct cifs_ses *root_ses = CIFS_DFS_ROOT_SES(tcon->ses);
        struct cifs_tcon *ipc = root_ses->tcon_ipc;
        char *share = NULL, *prefix = NULL;
-       const char *tcp_host;
-       size_t tcp_host_len;
        struct dfs_cache_tgt_iterator *tit;
        bool target_match;
 
-       extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
-
        tit = dfs_cache_get_tgt_iterator(tl);
        if (!tit) {
                rc = -ENOENT;
@@ -387,8 +386,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
                        break;
                }
 
-               rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
-                                                &target_match);
+               rc = target_share_matches_server(server, share, &target_match);
                if (rc)
                        break;
                if (!target_match) {
@@ -401,8 +399,7 @@ static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *t
                if (ipc->need_reconnect) {
                        scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
                        rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
-                       if (rc)
-                               break;
+                       cifs_dbg(FYI, "%s: reconnect ipc: %d\n", __func__, rc);
                }
 
                scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
@@ -498,7 +495,9 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
        }
 
        if (tcon->ipc) {
+               cifs_server_lock(server);
                scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+               cifs_server_unlock(server);
                rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
                goto out;
        }
index 43ad1176dcb9d643d64eb16424ab7ddacfd917f5..ac86bd0ebd637bc2d1440c3f2858489ed27b5364 100644 (file)
@@ -269,7 +269,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
                        list_for_each_entry(t, &ce->tlist, list) {
                                seq_printf(m, "  %s%s\n",
                                           t->name,
-                                          ce->tgthint == t ? " (target hint)" : "");
+                                          READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
                        }
                }
        }
@@ -321,7 +321,7 @@ static inline void dump_tgts(const struct cache_entry *ce)
        cifs_dbg(FYI, "target list:\n");
        list_for_each_entry(t, &ce->tlist, list) {
                cifs_dbg(FYI, "  %s%s\n", t->name,
-                        ce->tgthint == t ? " (target hint)" : "");
+                        READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
        }
 }
 
@@ -427,7 +427,7 @@ static int cache_entry_hash(const void *data, int size, unsigned int *hash)
 /* Return target hint of a DFS cache entry */
 static inline char *get_tgt_name(const struct cache_entry *ce)
 {
-       struct cache_dfs_tgt *t = ce->tgthint;
+       struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
 
        return t ? t->name : ERR_PTR(-ENOENT);
 }
@@ -470,6 +470,7 @@ static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
 static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
                         struct cache_entry *ce, const char *tgthint)
 {
+       struct cache_dfs_tgt *target;
        int i;
 
        ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
@@ -496,8 +497,9 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
                ce->numtgts++;
        }
 
-       ce->tgthint = list_first_entry_or_null(&ce->tlist,
-                                              struct cache_dfs_tgt, list);
+       target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
+                                         list);
+       WRITE_ONCE(ce->tgthint, target);
 
        return 0;
 }
@@ -558,7 +560,8 @@ static void remove_oldest_entry_locked(void)
 }
 
 /* Add a new DFS cache entry */
-static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
+static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
+                                                 int numrefs)
 {
        int rc;
        struct cache_entry *ce;
@@ -573,11 +576,11 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
 
        rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
        if (rc)
-               return rc;
+               return ERR_PTR(rc);
 
        ce = alloc_cache_entry(refs, numrefs);
        if (IS_ERR(ce))
-               return PTR_ERR(ce);
+               return ce;
 
        spin_lock(&cache_ttl_lock);
        if (!cache_ttl) {
@@ -594,7 +597,7 @@ static int add_cache_entry_locked(struct dfs_info3_param *refs, int numrefs)
 
        atomic_inc(&cache_count);
 
-       return 0;
+       return ce;
 }
 
 /* Check if two DFS paths are equal.  @s1 and @s2 are expected to be in @cache_cp's charset */
@@ -641,7 +644,9 @@ static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int h
  *
  * Use whole path components in the match.  Must be called with htable_rw_lock held.
  *
+ * Return cached entry if successful.
  * Return ERR_PTR(-ENOENT) if the entry is not found.
+ * Return error ptr otherwise.
  */
 static struct cache_entry *lookup_cache_entry(const char *path)
 {
@@ -711,14 +716,15 @@ void dfs_cache_destroy(void)
 static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
                                     int numrefs)
 {
+       struct cache_dfs_tgt *target;
+       char *th = NULL;
        int rc;
-       char *s, *th = NULL;
 
        WARN_ON(!rwsem_is_locked(&htable_rw_lock));
 
-       if (ce->tgthint) {
-               s = ce->tgthint->name;
-               th = kstrdup(s, GFP_ATOMIC);
+       target = READ_ONCE(ce->tgthint);
+       if (target) {
+               th = kstrdup(target->name, GFP_ATOMIC);
                if (!th)
                        return -ENOMEM;
        }
@@ -767,51 +773,75 @@ static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const
  *
  * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
  * handle them properly.
+ *
+ * On success, return entry with acquired lock for reading, otherwise error ptr.
  */
-static int cache_refresh_path(const unsigned int xid, struct cifs_ses *ses, const char *path)
+static struct cache_entry *cache_refresh_path(const unsigned int xid,
+                                             struct cifs_ses *ses,
+                                             const char *path,
+                                             bool force_refresh)
 {
-       int rc;
-       struct cache_entry *ce;
        struct dfs_info3_param *refs = NULL;
+       struct cache_entry *ce;
        int numrefs = 0;
-       bool newent = false;
+       int rc;
 
        cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
 
-       down_write(&htable_rw_lock);
+       down_read(&htable_rw_lock);
 
        ce = lookup_cache_entry(path);
        if (!IS_ERR(ce)) {
-               if (!cache_entry_expired(ce)) {
-                       dump_ce(ce);
-                       up_write(&htable_rw_lock);
-                       return 0;
-               }
-       } else {
-               newent = true;
+               if (!force_refresh && !cache_entry_expired(ce))
+                       return ce;
+       } else if (PTR_ERR(ce) != -ENOENT) {
+               up_read(&htable_rw_lock);
+               return ce;
        }
 
        /*
-        * Either the entry was not found, or it is expired.
+        * Unlock shared access as we don't want to hold any locks while getting
+        * a new referral.  The @ses used for performing the I/O could be
+        * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
+        * in order to failover -- if necessary.
+        */
+       up_read(&htable_rw_lock);
+
+       /*
+        * Either the entry was not found, or it is expired, or it is a forced
+        * refresh.
         * Request a new DFS referral in order to create or update a cache entry.
         */
        rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-       if (rc)
-               goto out_unlock;
+       if (rc) {
+               ce = ERR_PTR(rc);
+               goto out;
+       }
 
        dump_refs(refs, numrefs);
 
-       if (!newent) {
-               rc = update_cache_entry_locked(ce, refs, numrefs);
-               goto out_unlock;
+       down_write(&htable_rw_lock);
+       /* Re-check as another task might have it added or refreshed already */
+       ce = lookup_cache_entry(path);
+       if (!IS_ERR(ce)) {
+               if (force_refresh || cache_entry_expired(ce)) {
+                       rc = update_cache_entry_locked(ce, refs, numrefs);
+                       if (rc)
+                               ce = ERR_PTR(rc);
+               }
+       } else if (PTR_ERR(ce) == -ENOENT) {
+               ce = add_cache_entry_locked(refs, numrefs);
        }
 
-       rc = add_cache_entry_locked(refs, numrefs);
+       if (IS_ERR(ce)) {
+               up_write(&htable_rw_lock);
+               goto out;
+       }
 
-out_unlock:
-       up_write(&htable_rw_lock);
+       downgrade_write(&htable_rw_lock);
+out:
        free_dfs_info_array(refs, numrefs);
-       return rc;
+       return ce;
 }
 
 /*
@@ -878,7 +908,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
                }
                it->it_path_consumed = t->path_consumed;
 
-               if (ce->tgthint == t)
+               if (READ_ONCE(ce->tgthint) == t)
                        list_add(&it->it_list, head);
                else
                        list_add_tail(&it->it_list, head);
@@ -931,15 +961,8 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
        if (IS_ERR(npath))
                return PTR_ERR(npath);
 
-       rc = cache_refresh_path(xid, ses, npath);
-       if (rc)
-               goto out_free_path;
-
-       down_read(&htable_rw_lock);
-
-       ce = lookup_cache_entry(npath);
+       ce = cache_refresh_path(xid, ses, npath, false);
        if (IS_ERR(ce)) {
-               up_read(&htable_rw_lock);
                rc = PTR_ERR(ce);
                goto out_free_path;
        }
@@ -1002,72 +1025,6 @@ out_unlock:
        return rc;
 }
 
-/**
- * dfs_cache_update_tgthint - update target hint of a DFS cache entry
- *
- * If it doesn't find the cache entry, then it will get a DFS referral for @path
- * and create a new entry.
- *
- * In case the cache entry exists but expired, it will get a DFS referral
- * for @path and then update the respective cache entry.
- *
- * @xid: syscall id
- * @ses: smb session
- * @cp: codepage
- * @remap: type of character remapping for paths
- * @path: path to lookup in DFS referral cache
- * @it: DFS target iterator
- *
- * Return zero if the target hint was updated successfully, otherwise non-zero.
- */
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
-                            const struct nls_table *cp, int remap, const char *path,
-                            const struct dfs_cache_tgt_iterator *it)
-{
-       int rc;
-       const char *npath;
-       struct cache_entry *ce;
-       struct cache_dfs_tgt *t;
-
-       npath = dfs_cache_canonical_path(path, cp, remap);
-       if (IS_ERR(npath))
-               return PTR_ERR(npath);
-
-       cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath);
-
-       rc = cache_refresh_path(xid, ses, npath);
-       if (rc)
-               goto out_free_path;
-
-       down_write(&htable_rw_lock);
-
-       ce = lookup_cache_entry(npath);
-       if (IS_ERR(ce)) {
-               rc = PTR_ERR(ce);
-               goto out_unlock;
-       }
-
-       t = ce->tgthint;
-
-       if (likely(!strcasecmp(it->it_name, t->name)))
-               goto out_unlock;
-
-       list_for_each_entry(t, &ce->tlist, list) {
-               if (!strcasecmp(t->name, it->it_name)) {
-                       ce->tgthint = t;
-                       cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
-                                it->it_name);
-                       break;
-               }
-       }
-
-out_unlock:
-       up_write(&htable_rw_lock);
-out_free_path:
-       kfree(npath);
-       return rc;
-}
-
 /**
  * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
  * without sending any requests to the currently connected server.
@@ -1092,21 +1049,20 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
 
        cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
 
-       if (!down_write_trylock(&htable_rw_lock))
-               return;
+       down_read(&htable_rw_lock);
 
        ce = lookup_cache_entry(path);
        if (IS_ERR(ce))
                goto out_unlock;
 
-       t = ce->tgthint;
+       t = READ_ONCE(ce->tgthint);
 
        if (unlikely(!strcasecmp(it->it_name, t->name)))
                goto out_unlock;
 
        list_for_each_entry(t, &ce->tlist, list) {
                if (!strcasecmp(t->name, it->it_name)) {
-                       ce->tgthint = t;
+                       WRITE_ONCE(ce->tgthint, t);
                        cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
                                 it->it_name);
                        break;
@@ -1114,7 +1070,7 @@ void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt
        }
 
 out_unlock:
-       up_write(&htable_rw_lock);
+       up_read(&htable_rw_lock);
 }
 
 /**
@@ -1299,7 +1255,6 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
         * Resolve share's hostname and check if server address matches.  Otherwise just ignore it
         * as we could not have upcall to resolve hostname or failed to convert ip address.
         */
-       match = true;
        extract_unc_hostname(s1, &host, &hostlen);
        scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
 
@@ -1321,35 +1276,37 @@ static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, c
  * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
  * target shares in @refs.
  */
-static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cache_tgt_list *tl,
-                                        const struct dfs_info3_param *refs, int numrefs)
+static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
+                                        struct dfs_cache_tgt_list *old_tl,
+                                        struct dfs_cache_tgt_list *new_tl)
 {
-       struct dfs_cache_tgt_iterator *it;
-       int i;
-
-       for (it = dfs_cache_get_tgt_iterator(tl); it; it = dfs_cache_get_next_tgt(tl, it)) {
-               for (i = 0; i < numrefs; i++) {
-                       if (target_share_equal(tcon->ses->server, dfs_cache_get_tgt_name(it),
-                                              refs[i].node_name))
+       struct dfs_cache_tgt_iterator *oit, *nit;
+
+       for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
+            oit = dfs_cache_get_next_tgt(old_tl, oit)) {
+               for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
+                    nit = dfs_cache_get_next_tgt(new_tl, nit)) {
+                       if (target_share_equal(server,
+                                              dfs_cache_get_tgt_name(oit),
+                                              dfs_cache_get_tgt_name(nit)))
                                return;
                }
        }
 
        cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
-       cifs_signal_cifsd_for_reconnect(tcon->ses->server, true);
+       cifs_signal_cifsd_for_reconnect(server, true);
 }
 
 /* Refresh dfs referral of tcon and mark it for reconnect if needed */
 static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
 {
-       struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+       struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
+       struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
        struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
        struct cifs_tcon *ipc = ses->tcon_ipc;
-       struct dfs_info3_param *refs = NULL;
        bool needs_refresh = false;
        struct cache_entry *ce;
        unsigned int xid;
-       int numrefs = 0;
        int rc = 0;
 
        xid = get_xid();
@@ -1358,9 +1315,8 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
        ce = lookup_cache_entry(path);
        needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
        if (!IS_ERR(ce)) {
-               rc = get_targets(ce, &tl);
-               if (rc)
-                       cifs_dbg(FYI, "%s: could not get dfs targets: %d\n", __func__, rc);
+               rc = get_targets(ce, &old_tl);
+               cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
        }
        up_read(&htable_rw_lock);
 
@@ -1377,26 +1333,18 @@ static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_r
        }
        spin_unlock(&ipc->tc_lock);
 
-       rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
-       if (!rc) {
-               /* Create or update a cache entry with the new referral */
-               dump_refs(refs, numrefs);
-
-               down_write(&htable_rw_lock);
-               ce = lookup_cache_entry(path);
-               if (IS_ERR(ce))
-                       add_cache_entry_locked(refs, numrefs);
-               else if (force_refresh || cache_entry_expired(ce))
-                       update_cache_entry_locked(ce, refs, numrefs);
-               up_write(&htable_rw_lock);
-
-               mark_for_reconnect_if_needed(tcon, &tl, refs, numrefs);
+       ce = cache_refresh_path(xid, ses, path, true);
+       if (!IS_ERR(ce)) {
+               rc = get_targets(ce, &new_tl);
+               up_read(&htable_rw_lock);
+               cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
+               mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
        }
 
 out:
        free_xid(xid);
-       dfs_cache_free_tgts(&tl);
-       free_dfs_info_array(refs, numrefs);
+       dfs_cache_free_tgts(&old_tl);
+       dfs_cache_free_tgts(&new_tl);
        return rc;
 }
 
index f7cff0be932747af2d4261272bae880d60ba6f00..be3b5a44cf82711a575e8efe70934f7e07109131 100644 (file)
@@ -35,9 +35,6 @@ int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nl
                   struct dfs_cache_tgt_list *tgt_list);
 int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
                         struct dfs_cache_tgt_list *tgt_list);
-int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses,
-                            const struct nls_table *cp, int remap, const char *path,
-                            const struct dfs_cache_tgt_iterator *it);
 void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it);
 int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
                               struct dfs_info3_param *ref);
index bd374feeccaa19f021fdd03a4adf8d8e31f7d9fe..a5a097a6998371f12e3c87820d186985cbe1b3f8 100644 (file)
@@ -428,6 +428,7 @@ smb3_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms.disposition = FILE_CREATE;
        oparms.fid = &fid;
        oparms.reconnect = false;
+       oparms.mode = 0644;
 
        rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL,
                       NULL, NULL);
index 4d3c586785a59d65f54ad32f2a8b5ba7df47c08d..2a19c7987c5bd8aafbf2840ed8749979d99701ad 100644 (file)
@@ -1277,7 +1277,9 @@ int match_target_ip(struct TCP_Server_Info *server,
        if (rc < 0)
                return rc;
 
+       spin_lock(&server->srv_lock);
        *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
+       spin_unlock(&server->srv_lock);
        cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
        return 0;
 }
index 9e7d9f0baa18a133eb1afd76a4916d7f47d00dd2..c47b254f0d1e276a312f0bcca25e3c1cbf09bd9c 100644 (file)
@@ -292,9 +292,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
                        continue;
                }
                kref_get(&iface->refcount);
+               break;
        }
 
-       if (!list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+       if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
                rc = 1;
                iface = NULL;
                cifs_dbg(FYI, "unable to find a suitable iface\n");
@@ -814,6 +815,7 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
                return -EINVAL;
        }
        if (tilen) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(bcc_ptr + tioffset, tilen,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1427,6 +1429,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
                goto out_put_spnego_key;
        }
 
+       kfree_sensitive(ses->auth_key.response);
        ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                         GFP_KERNEL);
        if (!ses->auth_key.response) {
index 50480751e521ca752e784bf1f4190f33ca9a8e67..4cb364454e13094fb033f13c3ae34312209e317e 100644 (file)
@@ -562,17 +562,20 @@ static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
        if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
                rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
                                         cifs_remap(cifs_sb));
-               if (!rc)
-                       move_cifs_info_to_smb2(&data->fi, &fi);
                *adjustTZ = true;
        }
 
-       if (!rc && (le32_to_cpu(fi.Attributes) & ATTR_REPARSE)) {
+       if (!rc) {
                int tmprc;
                int oplock = 0;
                struct cifs_fid fid;
                struct cifs_open_parms oparms;
 
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+               if (!(le32_to_cpu(fi.Attributes) & ATTR_REPARSE))
+                       return 0;
+
                oparms.tcon = tcon;
                oparms.cifs_sb = cifs_sb;
                oparms.desired_access = FILE_READ_ATTRIBUTES;
@@ -716,17 +719,25 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path,
 static int cifs_open_file(const unsigned int xid, struct cifs_open_parms *oparms, __u32 *oplock,
                          void *buf)
 {
-       FILE_ALL_INFO *fi = buf;
+       struct cifs_open_info_data *data = buf;
+       FILE_ALL_INFO fi = {};
+       int rc;
 
        if (!(oparms->tcon->ses->capabilities & CAP_NT_SMBS))
-               return SMBLegacyOpen(xid, oparms->tcon, oparms->path,
-                                    oparms->disposition,
-                                    oparms->desired_access,
-                                    oparms->create_options,
-                                    &oparms->fid->netfid, oplock, fi,
-                                    oparms->cifs_sb->local_nls,
-                                    cifs_remap(oparms->cifs_sb));
-       return CIFS_open(xid, oparms, oplock, fi);
+               rc = SMBLegacyOpen(xid, oparms->tcon, oparms->path,
+                                  oparms->disposition,
+                                  oparms->desired_access,
+                                  oparms->create_options,
+                                  &oparms->fid->netfid, oplock, &fi,
+                                  oparms->cifs_sb->local_nls,
+                                  cifs_remap(oparms->cifs_sb));
+       else
+               rc = CIFS_open(xid, oparms, oplock, &fi);
+
+       if (!rc && data)
+               move_cifs_info_to_smb2(&data->fi, &fi);
+
+       return rc;
 }
 
 static void
@@ -1050,7 +1061,7 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
        struct inode *newinode = NULL;
        int rc = -EPERM;
-       FILE_ALL_INFO *buf = NULL;
+       struct cifs_open_info_data buf = {};
        struct cifs_io_parms io_parms;
        __u32 oplock = 0;
        struct cifs_fid fid;
@@ -1082,14 +1093,14 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                                            cifs_sb->local_nls,
                                            cifs_remap(cifs_sb));
                if (rc)
-                       goto out;
+                       return rc;
 
                rc = cifs_get_inode_info_unix(&newinode, full_path,
                                              inode->i_sb, xid);
 
                if (rc == 0)
                        d_instantiate(dentry, newinode);
-               goto out;
+               return rc;
        }
 
        /*
@@ -1097,19 +1108,13 @@ cifs_make_node(unsigned int xid, struct inode *inode,
         * support block and char device (no socket & fifo)
         */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
-               goto out;
+               return rc;
 
        if (!S_ISCHR(mode) && !S_ISBLK(mode))
-               goto out;
+               return rc;
 
        cifs_dbg(FYI, "sfu compat create special file\n");
 
-       buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
-       if (buf == NULL) {
-               rc = -ENOMEM;
-               goto out;
-       }
-
        oparms.tcon = tcon;
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_WRITE;
@@ -1124,21 +1129,21 @@ cifs_make_node(unsigned int xid, struct inode *inode,
                oplock = REQ_OPLOCK;
        else
                oplock = 0;
-       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, buf);
+       rc = tcon->ses->server->ops->open(xid, &oparms, &oplock, &buf);
        if (rc)
-               goto out;
+               return rc;
 
        /*
         * BB Do not bother to decode buf since no local inode yet to put
         * timestamps in, but we can reuse it safely.
         */
 
-       pdev = (struct win_dev *)buf;
+       pdev = (struct win_dev *)&buf.fi;
        io_parms.pid = current->tgid;
        io_parms.tcon = tcon;
        io_parms.offset = 0;
        io_parms.length = sizeof(struct win_dev);
-       iov[1].iov_base = buf;
+       iov[1].iov_base = &buf.fi;
        iov[1].iov_len = sizeof(struct win_dev);
        if (S_ISCHR(mode)) {
                memcpy(pdev->type, "IntxCHR", 8);
@@ -1157,8 +1162,8 @@ cifs_make_node(unsigned int xid, struct inode *inode,
        d_drop(dentry);
 
        /* FIXME: add code here to set EAs */
-out:
-       kfree(buf);
+
+       cifs_free_open_info(&buf);
        return rc;
 }
 
index dc160de7a6de4deeebe9616c7b25e8d9bbff52de..e6bcd2baf446a93a25ca3613029a960edd5137b9 100644 (file)
@@ -530,7 +530,6 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
        p = buf;
 
        spin_lock(&ses->iface_lock);
-       ses->iface_count = 0;
        /*
         * Go through iface_list and do kref_put to remove
         * any unused ifaces. ifaces in use will be removed
@@ -540,6 +539,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                                 iface_head) {
                iface->is_active = 0;
                kref_put(&iface->refcount, release_iface);
+               ses->iface_count--;
        }
        spin_unlock(&ses->iface_lock);
 
@@ -618,6 +618,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
                                /* just get a ref so that it doesn't get picked/freed */
                                iface->is_active = 1;
                                kref_get(&iface->refcount);
+                               ses->iface_count++;
                                spin_unlock(&ses->iface_lock);
                                goto next_iface;
                        } else if (ret < 0) {
@@ -4488,17 +4489,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
 
                /* copy pages form the old */
                for (j = 0; j < npages; j++) {
-                       char *dst, *src;
                        unsigned int offset, len;
 
                        rqst_page_get_length(new, j, &len, &offset);
 
-                       dst = kmap_local_page(new->rq_pages[j]) + offset;
-                       src = kmap_local_page(old->rq_pages[j]) + offset;
-
-                       memcpy(dst, src, len);
-                       kunmap(new->rq_pages[j]);
-                       kunmap(old->rq_pages[j]);
+                       memcpy_page(new->rq_pages[j], offset,
+                                   old->rq_pages[j], offset, len);
                }
        }
 
index a5695748a89b17aaf60ef3c454ed64273fccd9a7..2c9ffa921e6f68db581c9ee8d4dc316d5e15ba45 100644 (file)
@@ -541,9 +541,10 @@ static void
 assemble_neg_contexts(struct smb2_negotiate_req *req,
                      struct TCP_Server_Info *server, unsigned int *total_len)
 {
-       char *pneg_ctxt;
-       char *hostname = NULL;
        unsigned int ctxt_len, neg_context_count;
+       struct TCP_Server_Info *pserver;
+       char *pneg_ctxt;
+       char *hostname;
 
        if (*total_len > 200) {
                /* In case length corrupted don't want to overrun smb buffer */
@@ -574,8 +575,9 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
         * secondary channels don't have the hostname field populated
         * use the hostname field in the primary channel instead
         */
-       hostname = CIFS_SERVER_IS_CHAN(server) ?
-               server->primary_server->hostname : server->hostname;
+       pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+       cifs_server_lock(pserver);
+       hostname = pserver->hostname;
        if (hostname && (hostname[0] != 0)) {
                ctxt_len = build_netname_ctxt((struct smb2_netname_neg_context *)pneg_ctxt,
                                              hostname);
@@ -584,6 +586,7 @@ assemble_neg_contexts(struct smb2_negotiate_req *req,
                neg_context_count = 3;
        } else
                neg_context_count = 2;
+       cifs_server_unlock(pserver);
 
        build_posix_ctxt((struct smb2_posix_neg_context *)pneg_ctxt);
        *total_len += sizeof(struct smb2_posix_neg_context);
@@ -1450,6 +1453,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 
        /* keep session key if binding */
        if (!is_binding) {
+               kfree_sensitive(ses->auth_key.response);
                ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
                                                 GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -1479,8 +1483,11 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
 out_put_spnego_key:
        key_invalidate(spnego_key);
        key_put(spnego_key);
-       if (rc)
+       if (rc) {
                kfree_sensitive(ses->auth_key.response);
+               ses->auth_key.response = NULL;
+               ses->auth_key.len = 0;
+       }
 out:
        sess_data->result = rc;
        sess_data->func = NULL;
@@ -4156,12 +4163,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
                                (struct smb2_hdr *)rdata->iov[0].iov_base;
        struct cifs_credits credits = { .value = 0, .instance = 0 };
        struct smb_rqst rqst = { .rq_iov = &rdata->iov[1],
-                                .rq_nvec = 1,
-                                .rq_pages = rdata->pages,
-                                .rq_offset = rdata->page_offset,
-                                .rq_npages = rdata->nr_pages,
-                                .rq_pagesz = rdata->pagesz,
-                                .rq_tailsz = rdata->tailsz };
+                                .rq_nvec = 1, };
+
+       if (rdata->got_bytes) {
+               rqst.rq_pages = rdata->pages;
+               rqst.rq_offset = rdata->page_offset;
+               rqst.rq_npages = rdata->nr_pages;
+               rqst.rq_pagesz = rdata->pagesz;
+               rqst.rq_tailsz = rdata->tailsz;
+       }
 
        WARN_ONCE(rdata->server != mid->server,
                  "rdata server %p != mid server %p",
index 481788c24a68ba704bd21804f88333d5c84afe45..626a615dafc2ffa1604f155404165f76288478e1 100644 (file)
@@ -577,26 +577,25 @@ static int erofs_fc_parse_param(struct fs_context *fc,
                }
                ++ctx->devs->extra_devices;
                break;
-       case Opt_fsid:
 #ifdef CONFIG_EROFS_FS_ONDEMAND
+       case Opt_fsid:
                kfree(ctx->fsid);
                ctx->fsid = kstrdup(param->string, GFP_KERNEL);
                if (!ctx->fsid)
                        return -ENOMEM;
-#else
-               errorfc(fc, "fsid option not supported");
-#endif
                break;
        case Opt_domain_id:
-#ifdef CONFIG_EROFS_FS_ONDEMAND
                kfree(ctx->domain_id);
                ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
                if (!ctx->domain_id)
                        return -ENOMEM;
+               break;
 #else
-               errorfc(fc, "domain_id option not supported");
-#endif
+       case Opt_fsid:
+       case Opt_domain_id:
+               errorfc(fc, "%s option not supported", erofs_fs_parameters[opt].name);
                break;
+#endif
        default:
                return -ENOPARAM;
        }
index ccf7c55d477fe5a47c7b3f05475faa490547c28e..5200bb86e2643c3b5432926823f610a46531ef53 100644 (file)
@@ -1032,12 +1032,12 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
 
        if (!be->decompressed_pages)
                be->decompressed_pages =
-                       kvcalloc(be->nr_pages, sizeof(struct page *),
-                                GFP_KERNEL | __GFP_NOFAIL);
+                       kcalloc(be->nr_pages, sizeof(struct page *),
+                               GFP_KERNEL | __GFP_NOFAIL);
        if (!be->compressed_pages)
                be->compressed_pages =
-                       kvcalloc(pclusterpages, sizeof(struct page *),
-                                GFP_KERNEL | __GFP_NOFAIL);
+                       kcalloc(pclusterpages, sizeof(struct page *),
+                               GFP_KERNEL | __GFP_NOFAIL);
 
        z_erofs_parse_out_bvecs(be);
        err2 = z_erofs_parse_in_bvecs(be, &overlapped);
@@ -1085,7 +1085,7 @@ out:
        }
        if (be->compressed_pages < be->onstack_pages ||
            be->compressed_pages >= be->onstack_pages + Z_EROFS_ONSTACK_PAGES)
-               kvfree(be->compressed_pages);
+               kfree(be->compressed_pages);
        z_erofs_fill_other_copies(be, err);
 
        for (i = 0; i < be->nr_pages; ++i) {
@@ -1104,7 +1104,7 @@ out:
        }
 
        if (be->decompressed_pages != be->onstack_pages)
-               kvfree(be->decompressed_pages);
+               kfree(be->decompressed_pages);
 
        pcl->length = 0;
        pcl->partial = true;
index 0150570c33aae1bdb3d3670d592572037bc82218..98fb90b9af715c57112ee21446075218cbe37368 100644 (file)
@@ -793,12 +793,16 @@ static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset,
                iomap->type = IOMAP_HOLE;
                iomap->addr = IOMAP_NULL_ADDR;
                /*
-                * No strict rule how to describe extents for post EOF, yet
-                * we need do like below. Otherwise, iomap itself will get
+                * No strict rule on how to describe extents for post EOF, yet
+                * we need to do like below. Otherwise, iomap itself will get
                 * into an endless loop on post EOF.
+                *
+                * Calculate the effective offset by subtracting extent start
+                * (map.m_la) from the requested offset, and add it to length.
+                * (NB: offset >= map.m_la always)
                 */
                if (iomap->offset >= inode->i_size)
-                       iomap->length = length + map.m_la - offset;
+                       iomap->length = length + offset - map.m_la;
        }
        iomap->flags = 0;
        return 0;
index 7decaaf27e82bd9e090ff1ee99687719105b08a4..69a1b8c6a2ecae6e720fa1e903d6ac59ade6754f 100644 (file)
@@ -81,6 +81,8 @@ ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
                            struct mb_cache_entry **);
 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
                                    size_t value_count);
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value,
+                                   size_t value_count);
 static void ext4_xattr_rehash(struct ext4_xattr_header *);
 
 static const struct xattr_handler * const ext4_xattr_handler_map[] = {
@@ -470,8 +472,21 @@ ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
                tmp_data = cpu_to_le32(hash);
                e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
                                               &tmp_data, 1);
-               if (e_hash != entry->e_hash)
-                       return -EFSCORRUPTED;
+               /* All good? */
+               if (e_hash == entry->e_hash)
+                       return 0;
+
+               /*
+                * Not good. Maybe the entry hash was calculated
+                * using the buggy signed char version?
+                */
+               e_hash = ext4_xattr_hash_entry_signed(entry->e_name, entry->e_name_len,
+                                                       &tmp_data, 1);
+               if (e_hash == entry->e_hash)
+                       return 0;
+
+               /* Still no match - bad */
+               return -EFSCORRUPTED;
        }
        return 0;
 }
@@ -3091,6 +3106,28 @@ static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
        return cpu_to_le32(hash);
 }
 
+/*
+ * ext4_xattr_hash_entry_signed()
+ *
+ * Compute the hash of an extended attribute incorrectly.
+ */
+static __le32 ext4_xattr_hash_entry_signed(char *name, size_t name_len, __le32 *value, size_t value_count)
+{
+       __u32 hash = 0;
+
+       while (name_len--) {
+               hash = (hash << NAME_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
+                      (signed char)*name++;
+       }
+       while (value_count--) {
+               hash = (hash << VALUE_HASH_SHIFT) ^
+                      (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
+                      le32_to_cpu(*value++);
+       }
+       return cpu_to_le32(hash);
+}
+
 #undef NAME_HASH_SHIFT
 #undef VALUE_HASH_SHIFT
 
index 6e43e19c7d1cacd95f1dec99c9e5292795e1d34a..97e816590cd95ad7f4fa93a41abe6c51bb6f9a16 100644 (file)
@@ -2183,7 +2183,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
        sector_t last_block_in_file;
        const unsigned blocksize = blks_to_bytes(inode, 1);
        struct decompress_io_ctx *dic = NULL;
-       struct extent_info ei = {0, };
+       struct extent_info ei = {};
        bool from_dnode = true;
        int i;
        int ret = 0;
index 1bd38a78ebba938ca1c66b088722893411ab6dc4..342af24b2f8cf7d8237ff05b0db6cd92e51f5e8c 100644 (file)
@@ -546,7 +546,8 @@ static bool __lookup_extent_tree(struct inode *inode, pgoff_t pgofs,
        struct extent_node *en;
        bool ret = false;
 
-       f2fs_bug_on(sbi, !et);
+       if (!et)
+               return false;
 
        trace_f2fs_lookup_extent_tree_start(inode, pgofs, type);
 
@@ -881,12 +882,14 @@ static unsigned long long __calculate_block_age(unsigned long long new,
 }
 
 /* This returns a new age and allocated blocks in ei */
-static int __get_new_block_age(struct inode *inode, struct extent_info *ei)
+static int __get_new_block_age(struct inode *inode, struct extent_info *ei,
+                                               block_t blkaddr)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
        loff_t f_size = i_size_read(inode);
        unsigned long long cur_blocks =
                                atomic64_read(&sbi->allocated_data_blocks);
+       struct extent_info tei = *ei;   /* only fofs and len are valid */
 
        /*
         * When I/O is not aligned to a PAGE_SIZE, update will happen to the last
@@ -894,20 +897,20 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei)
         * block here.
         */
        if ((f_size >> PAGE_SHIFT) == ei->fofs && f_size & (PAGE_SIZE - 1) &&
-                       ei->blk == NEW_ADDR)
+                       blkaddr == NEW_ADDR)
                return -EINVAL;
 
-       if (__lookup_extent_tree(inode, ei->fofs, ei, EX_BLOCK_AGE)) {
+       if (__lookup_extent_tree(inode, ei->fofs, &tei, EX_BLOCK_AGE)) {
                unsigned long long cur_age;
 
-               if (cur_blocks >= ei->last_blocks)
-                       cur_age = cur_blocks - ei->last_blocks;
+               if (cur_blocks >= tei.last_blocks)
+                       cur_age = cur_blocks - tei.last_blocks;
                else
                        /* allocated_data_blocks overflow */
-                       cur_age = ULLONG_MAX - ei->last_blocks + cur_blocks;
+                       cur_age = ULLONG_MAX - tei.last_blocks + cur_blocks;
 
-               if (ei->age)
-                       ei->age = __calculate_block_age(cur_age, ei->age);
+               if (tei.age)
+                       ei->age = __calculate_block_age(cur_age, tei.age);
                else
                        ei->age = cur_age;
                ei->last_blocks = cur_blocks;
@@ -915,14 +918,14 @@ static int __get_new_block_age(struct inode *inode, struct extent_info *ei)
                return 0;
        }
 
-       f2fs_bug_on(sbi, ei->blk == NULL_ADDR);
+       f2fs_bug_on(sbi, blkaddr == NULL_ADDR);
 
        /* the data block was allocated for the first time */
-       if (ei->blk == NEW_ADDR)
+       if (blkaddr == NEW_ADDR)
                goto out;
 
-       if (__is_valid_data_blkaddr(ei->blk) &&
-                       !f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC_ENHANCE)) {
+       if (__is_valid_data_blkaddr(blkaddr) &&
+           !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
                f2fs_bug_on(sbi, 1);
                return -EINVAL;
        }
@@ -938,7 +941,7 @@ out:
 
 static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type type)
 {
-       struct extent_info ei;
+       struct extent_info ei = {};
 
        if (!__may_extent_tree(dn->inode, type))
                return;
@@ -953,8 +956,7 @@ static void __update_extent_cache(struct dnode_of_data *dn, enum extent_type typ
                else
                        ei.blk = dn->data_blkaddr;
        } else if (type == EX_BLOCK_AGE) {
-               ei.blk = dn->data_blkaddr;
-               if (__get_new_block_age(dn->inode, &ei))
+               if (__get_new_block_age(dn->inode, &ei, dn->data_blkaddr))
                        return;
        }
        __update_extent_tree_range(dn->inode, &ei, type);
index a6c4012798860098bfab8010a8f032df6a59bcad..ecbc8c135b494d9c8c87ed361b1c2ad704b914ff 100644 (file)
@@ -2559,7 +2559,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
        struct f2fs_map_blocks map = { .m_next_extent = NULL,
                                        .m_seg_type = NO_CHECK_TYPE,
                                        .m_may_create = false };
-       struct extent_info ei = {0, };
+       struct extent_info ei = {};
        pgoff_t pg_start, pg_end, next_pgofs;
        unsigned int blk_per_seg = sbi->blocks_per_seg;
        unsigned int total = 0, sec_num;
index 25ddea478fc1a8a628bf4e59d10584836a96052e..ae3c4e5474efa2d2869bddbb03da4b8da42bc8b4 100644 (file)
@@ -663,8 +663,7 @@ init_thread:
        if (IS_ERR(fcc->f2fs_issue_flush)) {
                int err = PTR_ERR(fcc->f2fs_issue_flush);
 
-               kfree(fcc);
-               SM_I(sbi)->fcc_info = NULL;
+               fcc->f2fs_issue_flush = NULL;
                return err;
        }
 
@@ -3161,7 +3160,7 @@ static int __get_segment_type_4(struct f2fs_io_info *fio)
 static int __get_age_segment_type(struct inode *inode, pgoff_t pgofs)
 {
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
-       struct extent_info ei;
+       struct extent_info ei = {};
 
        if (f2fs_lookup_age_extent_cache(inode, pgofs, &ei)) {
                if (!ei.age)
@@ -5138,11 +5137,9 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
 
        init_f2fs_rwsem(&sm_info->curseg_lock);
 
-       if (!f2fs_readonly(sbi->sb)) {
-               err = f2fs_create_flush_cmd_control(sbi);
-               if (err)
-                       return err;
-       }
+       err = f2fs_create_flush_cmd_control(sbi);
+       if (err)
+               return err;
 
        err = create_discard_cmd_control(sbi);
        if (err)
index 9c329a365e7502a1243aa013b72dafb120b6749b..3a155c1d810ef814c1e95996b584cbe32cd1d254 100644 (file)
@@ -458,15 +458,16 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                /* panic? */
                return -EIO;
 
+       res = -EIO;
        if (HFS_I(main_inode)->cat_key.CName.len > HFS_NAMELEN)
-               return -EIO;
+               goto out;
        fd.search_key->cat = HFS_I(main_inode)->cat_key;
        if (hfs_brec_find(&fd))
-               /* panic? */
                goto out;
 
        if (S_ISDIR(main_inode->i_mode)) {
-               WARN_ON(fd.entrylength < sizeof(struct hfs_cat_dir));
+               if (fd.entrylength < sizeof(struct hfs_cat_dir))
+                       goto out;
                hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
                           sizeof(struct hfs_cat_dir));
                if (rec.type != HFS_CDR_DIR ||
@@ -479,6 +480,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
                            sizeof(struct hfs_cat_dir));
        } else if (HFS_IS_RSRC(inode)) {
+               if (fd.entrylength < sizeof(struct hfs_cat_file))
+                       goto out;
                hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
                               sizeof(struct hfs_cat_file));
                hfs_inode_write_fork(inode, rec.file.RExtRec,
@@ -486,7 +489,8 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
                                sizeof(struct hfs_cat_file));
        } else {
-               WARN_ON(fd.entrylength < sizeof(struct hfs_cat_file));
+               if (fd.entrylength < sizeof(struct hfs_cat_file))
+                       goto out;
                hfs_bnode_read(fd.bnode, &rec, fd.entryoffset,
                           sizeof(struct hfs_cat_file));
                if (rec.type != HFS_CDR_FIL ||
@@ -503,9 +507,10 @@ int hfs_write_inode(struct inode *inode, struct writeback_control *wbc)
                hfs_bnode_write(fd.bnode, &rec, fd.entryoffset,
                            sizeof(struct hfs_cat_file));
        }
+       res = 0;
 out:
        hfs_find_exit(&fd);
-       return 0;
+       return res;
 }
 
 static struct dentry *hfs_file_lookup(struct inode *dir, struct dentry *dentry,
index 2a39ffb8423b75dfc205215d4df85b09a1859aa4..6e61b5bc7d86ed5add4e940a3972068a508c52a0 100644 (file)
@@ -322,7 +322,8 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
        dn_off = le32_to_cpu(authblob->DomainName.BufferOffset);
        dn_len = le16_to_cpu(authblob->DomainName.Length);
 
-       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len)
+       if (blob_len < (u64)dn_off + dn_len || blob_len < (u64)nt_off + nt_len ||
+           nt_len < CIFS_ENCPWD_SIZE)
                return -EINVAL;
 
        /* TODO : use domain name that imported from configuration file */
index 12be8386446a393aced9947013f410700bacebb0..fd0a288af299e37d0630f24d4192d530cae3197d 100644 (file)
@@ -316,9 +316,12 @@ int ksmbd_conn_handler_loop(void *p)
 
                /* 4 for rfc1002 length field */
                size = pdu_size + 4;
-               conn->request_buf = kvmalloc(size, GFP_KERNEL);
+               conn->request_buf = kvmalloc(size,
+                                            GFP_KERNEL |
+                                            __GFP_NOWARN |
+                                            __GFP_NORETRY);
                if (!conn->request_buf)
-                       continue;
+                       break;
 
                memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
                if (!ksmbd_smb_request(conn))
index 14d7f3599c6397a1dba915509b03a1644cc7ed2a..38fbda52e06fe4128372894e6b1b1e6e34289cad 100644 (file)
@@ -1928,13 +1928,13 @@ int smb2_tree_connect(struct ksmbd_work *work)
        if (conn->posix_ext_supported)
                status.tree_conn->posix_extensions = true;
 
-out_err1:
        rsp->StructureSize = cpu_to_le16(16);
+       inc_rfc1001_len(work->response_buf, 16);
+out_err1:
        rsp->Capabilities = 0;
        rsp->Reserved = 0;
        /* default manual caching */
        rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
-       inc_rfc1001_len(work->response_buf, 16);
 
        if (!IS_ERR(treename))
                kfree(treename);
@@ -1967,6 +1967,9 @@ out_err1:
                rsp->hdr.Status = STATUS_ACCESS_DENIED;
        }
 
+       if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
+               smb2_set_err_rsp(work);
+
        return rc;
 }
 
index 63d55f543bd2e4f9ee09a35e59a585685431b1ea..4c6bd0b699791321eda9ab9b82d98be3a5aa7ada 100644 (file)
@@ -295,6 +295,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
        struct msghdr ksmbd_msg;
        struct kvec *iov;
        struct ksmbd_conn *conn = KSMBD_TRANS(t)->conn;
+       int max_retry = 2;
 
        iov = get_conn_iovec(t, nr_segs);
        if (!iov)
@@ -321,9 +322,11 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
                } else if (conn->status == KSMBD_SESS_NEED_RECONNECT) {
                        total_read = -EAGAIN;
                        break;
-               } else if (length == -ERESTARTSYS || length == -EAGAIN) {
+               } else if ((length == -ERESTARTSYS || length == -EAGAIN) &&
+                          max_retry) {
                        usleep_range(1000, 2000);
                        length = 0;
+                       max_retry--;
                        continue;
                } else if (length <= 0) {
                        total_read = -EAGAIN;
index ea1ceffa1d3aa78ee4ccaeb385df5d6c910c6921..f7e4a88d5d92928a60858308cac312f87632d0bd 100644 (file)
@@ -2957,12 +2957,14 @@ static u64 nfs_access_login_time(const struct task_struct *task,
                                 const struct cred *cred)
 {
        const struct task_struct *parent;
+       const struct cred *pcred;
        u64 ret;
 
        rcu_read_lock();
        for (;;) {
                parent = rcu_dereference(task->real_parent);
-               if (parent == task || cred_fscmp(parent->cred, cred) != 0)
+               pcred = rcu_dereference(parent->cred);
+               if (parent == task || cred_fscmp(pcred, cred) != 0)
                        break;
                task = parent;
        }
@@ -3023,6 +3025,7 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
         * but do it without locking.
         */
        struct nfs_inode *nfsi = NFS_I(inode);
+       u64 login_time = nfs_access_login_time(current, cred);
        struct nfs_access_entry *cache;
        int err = -ECHILD;
        struct list_head *lh;
@@ -3037,6 +3040,8 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
                cache = NULL;
        if (cache == NULL)
                goto out;
+       if ((s64)(login_time - cache->timestamp) > 0)
+               goto out;
        if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
                goto out;
        *mask = cache->mask;
index ad34a33b0737c7611f791290b69edf9fe722efe3..4974cd18ca468ed8f7301556907c0075e0738fa2 100644 (file)
@@ -783,6 +783,12 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
        return &fl->generic_hdr;
 }
 
+static bool
+filelayout_lseg_is_striped(const struct nfs4_filelayout_segment *flseg)
+{
+       return flseg->num_fh > 1;
+}
+
 /*
  * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
  *
@@ -803,6 +809,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
        size = pnfs_generic_pg_test(pgio, prev, req);
        if (!size)
                return 0;
+       else if (!filelayout_lseg_is_striped(FILELAYOUT_LSEG(pgio->pg_lseg)))
+               return size;
 
        /* see if req and prev are in the same stripe */
        if (prev) {
index 45b2c9e3f6360038e0d41191f61e8cd7497577df..0ef0703490144b4017583b26d24547c66f3d9480 100644 (file)
@@ -1071,8 +1071,8 @@ nfsd_file_is_cached(struct inode *inode)
 
 static __be32
 nfsd_file_do_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                    unsigned int may_flags, struct nfsd_file **pnf,
-                    bool open, bool want_gc)
+                    unsigned int may_flags, struct file *file,
+                    struct nfsd_file **pnf, bool want_gc)
 {
        struct nfsd_file_lookup_key key = {
                .type   = NFSD_FILE_KEY_FULL,
@@ -1147,8 +1147,7 @@ wait_for_construction:
        status = nfserrno(nfsd_open_break_lease(file_inode(nf->nf_file), may_flags));
 out:
        if (status == nfs_ok) {
-               if (open)
-                       this_cpu_inc(nfsd_file_acquisitions);
+               this_cpu_inc(nfsd_file_acquisitions);
                *pnf = nf;
        } else {
                if (refcount_dec_and_test(&nf->nf_ref))
@@ -1158,20 +1157,23 @@ out:
 
 out_status:
        put_cred(key.cred);
-       if (open)
-               trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
+       trace_nfsd_file_acquire(rqstp, key.inode, may_flags, nf, status);
        return status;
 
 open_file:
        trace_nfsd_file_alloc(nf);
        nf->nf_mark = nfsd_file_mark_find_or_create(nf, key.inode);
        if (nf->nf_mark) {
-               if (open) {
+               if (file) {
+                       get_file(file);
+                       nf->nf_file = file;
+                       status = nfs_ok;
+                       trace_nfsd_file_opened(nf, status);
+               } else {
                        status = nfsd_open_verified(rqstp, fhp, may_flags,
                                                    &nf->nf_file);
                        trace_nfsd_file_open(nf, status);
-               } else
-                       status = nfs_ok;
+               }
        } else
                status = nfserr_jukebox;
        /*
@@ -1207,7 +1209,7 @@ __be32
 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
                     unsigned int may_flags, struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true, true);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, true);
 }
 
 /**
@@ -1228,28 +1230,30 @@ __be32
 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, true, false);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, NULL, pnf, false);
 }
 
 /**
- * nfsd_file_create - Get a struct nfsd_file, do not open
+ * nfsd_file_acquire_opened - Get a struct nfsd_file using existing open file
  * @rqstp: the RPC transaction being executed
  * @fhp: the NFS filehandle of the file just created
  * @may_flags: NFSD_MAY_ settings for the file
+ * @file: cached, already-open file (may be NULL)
  * @pnf: OUT: new or found "struct nfsd_file" object
  *
- * The nfsd_file_object returned by this API is reference-counted
- * but not garbage-collected. The object is released immediately
- * one RCU grace period after the final nfsd_file_put().
+ * Acquire a nfsd_file object that is not GC'ed. If one doesn't already exist,
+ * and @file is non-NULL, use it to instantiate a new nfsd_file instead of
+ * opening a new one.
  *
  * Returns nfs_ok and sets @pnf on success; otherwise an nfsstat in
  * network byte order is returned.
  */
 __be32
-nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                unsigned int may_flags, struct nfsd_file **pnf)
+nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                        unsigned int may_flags, struct file *file,
+                        struct nfsd_file **pnf)
 {
-       return nfsd_file_do_acquire(rqstp, fhp, may_flags, pnf, false, false);
+       return nfsd_file_do_acquire(rqstp, fhp, may_flags, file, pnf, false);
 }
 
 /*
index b7efb2c3ddb18527d503322a792fc21dd8f8be7e..41516a4263ea519c8af05e85d82f8ca87a248e4f 100644 (file)
@@ -60,7 +60,8 @@ __be32 nfsd_file_acquire_gc(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **nfp);
 __be32 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
                  unsigned int may_flags, struct nfsd_file **nfp);
-__be32 nfsd_file_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
-                 unsigned int may_flags, struct nfsd_file **nfp);
+__be32 nfsd_file_acquire_opened(struct svc_rqst *rqstp, struct svc_fh *fhp,
+                 unsigned int may_flags, struct file *file,
+                 struct nfsd_file **nfp);
 int nfsd_file_cache_stats_show(struct seq_file *m, void *v);
 #endif /* _FS_NFSD_FILECACHE_H */
index 8c854ba3285bbcddfe62010dc6b93018fdc3927c..51a4b7885cae2a2a5f5cab4932ebcd6306d10348 100644 (file)
@@ -195,7 +195,7 @@ struct nfsd_net {
 
        atomic_t                nfsd_courtesy_clients;
        struct shrinker         nfsd_client_shrinker;
-       struct delayed_work     nfsd_shrinker_work;
+       struct work_struct      nfsd_shrinker_work;
 };
 
 /* Simple check to find out if a given net was properly initialized */
index bd880d55f565b0eb8a97b63bcceab144e4a59ecf..f189ba7995f5ae4ec7424e1660822f8e41191dc0 100644 (file)
@@ -937,7 +937,7 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
         * the client wants us to do more in this compound:
         */
        if (!nfsd4_last_compound_op(rqstp))
-               __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+               clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        /* check stateid */
        status = nfs4_preprocess_stateid_op(rqstp, cstate, &cstate->current_fh,
@@ -1318,6 +1318,7 @@ try_again:
                        /* allow 20secs for mount/unmount for now - revisit */
                        if (signal_pending(current) ||
                                        (schedule_timeout(20*HZ) == 0)) {
+                               finish_wait(&nn->nfsd_ssc_waitq, &wait);
                                kfree(work);
                                return nfserr_eagain;
                        }
@@ -2607,12 +2608,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
        cstate->minorversion = args->minorversion;
        fh_init(current_fh, NFS4_FHSIZE);
        fh_init(save_fh, NFS4_FHSIZE);
-
        /*
         * Don't use the deferral mechanism for NFSv4; compounds make it
         * too hard to avoid non-idempotency problems.
         */
-       __clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       clear_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
 
        /*
         * According to RFC3010, this takes precedence over all other errors.
@@ -2734,7 +2734,7 @@ encode_op:
 out:
        cstate->status = status;
        /* Reset deferral mechanism for RPC deferrals */
-       __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
        return rpc_success;
 }
 
index 7b2ee535ade83235e0acaf5b4c6a9d1e75ecdd56..4ef529379065a10d6957680f1ab50e9392215cc5 100644 (file)
@@ -4411,7 +4411,7 @@ nfsd4_state_shrinker_count(struct shrinker *shrink, struct shrink_control *sc)
        if (!count)
                count = atomic_long_read(&num_delegations);
        if (count)
-               mod_delayed_work(laundry_wq, &nn->nfsd_shrinker_work, 0);
+               queue_work(laundry_wq, &nn->nfsd_shrinker_work);
        return (unsigned long)count;
 }
 
@@ -4421,7 +4421,7 @@ nfsd4_state_shrinker_scan(struct shrinker *shrink, struct shrink_control *sc)
        return SHRINK_STOP;
 }
 
-int
+void
 nfsd4_init_leases_net(struct nfsd_net *nn)
 {
        struct sysinfo si;
@@ -4443,16 +4443,6 @@ nfsd4_init_leases_net(struct nfsd_net *nn)
        nn->nfs4_max_clients = max_t(int, max_clients, NFS4_CLIENTS_PER_GB);
 
        atomic_set(&nn->nfsd_courtesy_clients, 0);
-       nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
-       nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
-       nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
-       return register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client");
-}
-
-void
-nfsd4_leases_net_shutdown(struct nfsd_net *nn)
-{
-       unregister_shrinker(&nn->nfsd_client_shrinker);
 }
 
 static void init_nfs4_replay(struct nfs4_replay *rp)
@@ -5262,18 +5252,10 @@ static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
        if (!fp->fi_fds[oflag]) {
                spin_unlock(&fp->fi_lock);
 
-               if (!open->op_filp) {
-                       status = nfsd_file_acquire(rqstp, cur_fh, access, &nf);
-                       if (status != nfs_ok)
-                               goto out_put_access;
-               } else {
-                       status = nfsd_file_create(rqstp, cur_fh, access, &nf);
-                       if (status != nfs_ok)
-                               goto out_put_access;
-                       nf->nf_file = open->op_filp;
-                       open->op_filp = NULL;
-                       trace_nfsd_file_create(rqstp, access, nf);
-               }
+               status = nfsd_file_acquire_opened(rqstp, cur_fh, access,
+                                                 open->op_filp, &nf);
+               if (status != nfs_ok)
+                       goto out_put_access;
 
                spin_lock(&fp->fi_lock);
                if (!fp->fi_fds[oflag]) {
@@ -6243,8 +6225,7 @@ deleg_reaper(struct nfsd_net *nn)
 static void
 nfsd4_state_shrinker_worker(struct work_struct *work)
 {
-       struct delayed_work *dwork = to_delayed_work(work);
-       struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
+       struct nfsd_net *nn = container_of(work, struct nfsd_net,
                                nfsd_shrinker_work);
 
        courtesy_client_reaper(nn);
@@ -8074,11 +8055,20 @@ static int nfs4_state_create_net(struct net *net)
        INIT_LIST_HEAD(&nn->blocked_locks_lru);
 
        INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
-       INIT_DELAYED_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
+       INIT_WORK(&nn->nfsd_shrinker_work, nfsd4_state_shrinker_worker);
        get_net(net);
 
+       nn->nfsd_client_shrinker.scan_objects = nfsd4_state_shrinker_scan;
+       nn->nfsd_client_shrinker.count_objects = nfsd4_state_shrinker_count;
+       nn->nfsd_client_shrinker.seeks = DEFAULT_SEEKS;
+
+       if (register_shrinker(&nn->nfsd_client_shrinker, "nfsd-client"))
+               goto err_shrinker;
        return 0;
 
+err_shrinker:
+       put_net(net);
+       kfree(nn->sessionid_hashtbl);
 err_sessionid:
        kfree(nn->unconf_id_hashtbl);
 err_unconf_id:
@@ -8171,6 +8161,8 @@ nfs4_state_shutdown_net(struct net *net)
        struct list_head *pos, *next, reaplist;
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
+       unregister_shrinker(&nn->nfsd_client_shrinker);
+       cancel_work(&nn->nfsd_shrinker_work);
        cancel_delayed_work_sync(&nn->laundromat_work);
        locks_end_grace(&nn->nfsd4_manager);
 
index 2b4ae858c89bea21798cbb8e2b5125523354c01d..97edb32be77f187832c479a76ea3a29ffd7c80a6 100644 (file)
@@ -2523,7 +2523,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
        argp->rqstp->rq_cachetype = cachethis ? RC_REPLBUFF : RC_NOCACHE;
 
        if (readcount > 1 || max_reply > PAGE_SIZE - auth_slack)
-               __clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
+               clear_bit(RQ_SPLICE_OK, &argp->rqstp->rq_flags);
 
        return true;
 }
@@ -3629,6 +3629,17 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
        case nfserr_noent:
                xdr_truncate_encode(xdr, start_offset);
                goto skip_entry;
+       case nfserr_jukebox:
+               /*
+                * The pseudoroot should only display dentries that lead to
+                * exports. If we get EJUKEBOX here, then we can't tell whether
+                * this entry should be included. Just fail the whole READDIR
+                * with NFS4ERR_DELAY in that case, and hope that the situation
+                * will resolve itself by the client's next attempt.
+                */
+               if (cd->rd_fhp->fh_export->ex_flags & NFSEXP_V4ROOT)
+                       goto fail;
+               fallthrough;
        default:
                /*
                 * If the client requested the RDATTR_ERROR attribute,
index d1e581a60480c04d28d61a6fc5ca60ccfb2d8f70..c2577ee7ffb2208e3f32282de7bd912ac47ca0d7 100644 (file)
@@ -1457,9 +1457,7 @@ static __net_init int nfsd_init_net(struct net *net)
                goto out_idmap_error;
        nn->nfsd_versions = NULL;
        nn->nfsd4_minorversions = NULL;
-       retval = nfsd4_init_leases_net(nn);
-       if (retval)
-               goto out_drc_error;
+       nfsd4_init_leases_net(nn);
        retval = nfsd_reply_cache_init(nn);
        if (retval)
                goto out_cache_error;
@@ -1469,8 +1467,6 @@ static __net_init int nfsd_init_net(struct net *net)
        return 0;
 
 out_cache_error:
-       nfsd4_leases_net_shutdown(nn);
-out_drc_error:
        nfsd_idmap_shutdown(net);
 out_idmap_error:
        nfsd_export_shutdown(net);
@@ -1486,7 +1482,6 @@ static __net_exit void nfsd_exit_net(struct net *net)
        nfsd_idmap_shutdown(net);
        nfsd_export_shutdown(net);
        nfsd_netns_free_versions(net_generic(net, nfsd_net_id));
-       nfsd4_leases_net_shutdown(nn);
 }
 
 static struct pernet_operations nfsd_net_ops = {
index 93b42ef9ed91b63a1636b5d2d953169827bb6746..fa0144a742678fd46119dc6c871dba69cd383257 100644 (file)
@@ -504,8 +504,7 @@ extern void unregister_cld_notifier(void);
 extern void nfsd4_ssc_init_umount_work(struct nfsd_net *nn);
 #endif
 
-extern int nfsd4_init_leases_net(struct nfsd_net *nn);
-extern void nfsd4_leases_net_shutdown(struct nfsd_net *nn);
+extern void nfsd4_init_leases_net(struct nfsd_net *nn);
 
 #else /* CONFIG_NFSD_V4 */
 static inline int nfsd4_is_junction(struct dentry *dentry)
@@ -513,8 +512,7 @@ static inline int nfsd4_is_junction(struct dentry *dentry)
        return 0;
 }
 
-static inline int nfsd4_init_leases_net(struct nfsd_net *nn) { return 0; };
-static inline void nfsd4_leases_net_shutdown(struct nfsd_net *nn) {};
+static inline void nfsd4_init_leases_net(struct nfsd_net *nn) { };
 
 #define register_cld_notifier() 0
 #define unregister_cld_notifier() do { } while(0)
index a5570cf75f3fd550a38b6b82fffdba4039fa9503..9744443c396522af6bf9ae34eca57c60a133cc8f 100644 (file)
@@ -211,7 +211,7 @@ nfsd_proc_read(struct svc_rqst *rqstp)
        if (resp->status == nfs_ok)
                resp->status = fh_getattr(&resp->fh, &resp->stat);
        else if (resp->status == nfserr_jukebox)
-               __set_bit(RQ_DROPME, &rqstp->rq_flags);
+               set_bit(RQ_DROPME, &rqstp->rq_flags);
        return rpc_success;
 }
 
@@ -246,7 +246,7 @@ nfsd_proc_write(struct svc_rqst *rqstp)
        if (resp->status == nfs_ok)
                resp->status = fh_getattr(&resp->fh, &resp->stat);
        else if (resp->status == nfserr_jukebox)
-               __set_bit(RQ_DROPME, &rqstp->rq_flags);
+               set_bit(RQ_DROPME, &rqstp->rq_flags);
        return rpc_success;
 }
 
index 56fba1cba3af7eb574b8cf8ee6c1822994eef863..325d3d3f121109abfbdccabf05ab9d383f47adc1 100644 (file)
@@ -453,8 +453,8 @@ static void nfsd_shutdown_net(struct net *net)
 {
        struct nfsd_net *nn = net_generic(net, nfsd_net_id);
 
-       nfsd_file_cache_shutdown_net(net);
        nfs4_state_shutdown_net(net);
+       nfsd_file_cache_shutdown_net(net);
        if (nn->lockd_up) {
                lockd_down(net);
                nn->lockd_up = false;
index c852ae8eaf3714bcdc084492c1f7208928f388bb..8f9c82d9e075b286ac875ae36fb26d3d249174a4 100644 (file)
@@ -981,43 +981,6 @@ TRACE_EVENT(nfsd_file_acquire,
        )
 );
 
-TRACE_EVENT(nfsd_file_create,
-       TP_PROTO(
-               const struct svc_rqst *rqstp,
-               unsigned int may_flags,
-               const struct nfsd_file *nf
-       ),
-
-       TP_ARGS(rqstp, may_flags, nf),
-
-       TP_STRUCT__entry(
-               __field(const void *, nf_inode)
-               __field(const void *, nf_file)
-               __field(unsigned long, may_flags)
-               __field(unsigned long, nf_flags)
-               __field(unsigned long, nf_may)
-               __field(unsigned int, nf_ref)
-               __field(u32, xid)
-       ),
-
-       TP_fast_assign(
-               __entry->nf_inode = nf->nf_inode;
-               __entry->nf_file = nf->nf_file;
-               __entry->may_flags = may_flags;
-               __entry->nf_flags = nf->nf_flags;
-               __entry->nf_may = nf->nf_may;
-               __entry->nf_ref = refcount_read(&nf->nf_ref);
-               __entry->xid = be32_to_cpu(rqstp->rq_xid);
-       ),
-
-       TP_printk("xid=0x%x inode=%p may_flags=%s ref=%u nf_flags=%s nf_may=%s nf_file=%p",
-               __entry->xid, __entry->nf_inode,
-               show_nfsd_may_flags(__entry->may_flags),
-               __entry->nf_ref, show_nf_flags(__entry->nf_flags),
-               show_nfsd_may_flags(__entry->nf_may), __entry->nf_file
-       )
-);
-
 TRACE_EVENT(nfsd_file_insert_err,
        TP_PROTO(
                const struct svc_rqst *rqstp,
@@ -1079,8 +1042,8 @@ TRACE_EVENT(nfsd_file_cons_err,
        )
 );
 
-TRACE_EVENT(nfsd_file_open,
-       TP_PROTO(struct nfsd_file *nf, __be32 status),
+DECLARE_EVENT_CLASS(nfsd_file_open_class,
+       TP_PROTO(const struct nfsd_file *nf, __be32 status),
        TP_ARGS(nf, status),
        TP_STRUCT__entry(
                __field(void *, nf_inode)       /* cannot be dereferenced */
@@ -1104,6 +1067,17 @@ TRACE_EVENT(nfsd_file_open,
                __entry->nf_file)
 )
 
+#define DEFINE_NFSD_FILE_OPEN_EVENT(name)                                      \
+DEFINE_EVENT(nfsd_file_open_class, name,                                       \
+       TP_PROTO(                                                       \
+               const struct nfsd_file *nf,                             \
+               __be32 status                                           \
+       ),                                                              \
+       TP_ARGS(nf, status))
+
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_open);
+DEFINE_NFSD_FILE_OPEN_EVENT(nfsd_file_opened);
+
 TRACE_EVENT(nfsd_file_is_cached,
        TP_PROTO(
                const struct inode *inode,
index b9d15c3df3cc193ba10cf88761d45027ee6de346..40ce92a332fe7a6553eeb78acf0414a32defac78 100644 (file)
@@ -480,9 +480,18 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr,
        ret = nilfs_btnode_submit_block(btnc, ptr, 0, REQ_OP_READ, &bh,
                                        &submit_ptr);
        if (ret) {
-               if (ret != -EEXIST)
-                       return ret;
-               goto out_check;
+               if (likely(ret == -EEXIST))
+                       goto out_check;
+               if (ret == -ENOENT) {
+                       /*
+                        * Block address translation failed due to invalid
+                        * value of 'ptr'.  In this case, return internal code
+                        * -EINVAL (broken bmap) to notify bmap layer of fatal
+                        * metadata corruption.
+                        */
+                       ret = -EINVAL;
+               }
+               return ret;
        }
 
        if (ra) {
index e5399ebc3a2b2d856bf5b81b392f97af2cd79814..d294cd9756887e13358e4fca461e48d06447f46e 100644 (file)
@@ -390,10 +390,10 @@ static int ntfs_truncate(struct inode *inode, loff_t new_size)
 
        new_valid = ntfs_up_block(sb, min_t(u64, ni->i_valid, new_size));
 
-       ni_lock(ni);
-
        truncate_setsize(inode, new_size);
 
+       ni_lock(ni);
+
        down_write(&ni->file.run_lock);
        err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
                            &new_valid, ni->mi.sbi->options->prealloc, NULL);
index 1d7c2a812fc19809630b70c2969021740bf12fee..34e416327dd4ee2e4f3e399a0b50eabdd7ff44e1 100644 (file)
@@ -595,7 +595,7 @@ static void udf_do_extend_final_block(struct inode *inode,
         */
        if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
                return;
-       added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
+       added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
        last_ext->extLength += added_bytes;
        UDF_I(inode)->i_lenExtents += added_bytes;
 
@@ -684,7 +684,7 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
        struct kernel_lb_addr eloc, tmpeloc;
        int c = 1;
        loff_t lbcount = 0, b_off = 0;
-       udf_pblk_t newblocknum, newblock;
+       udf_pblk_t newblocknum, newblock = 0;
        sector_t offset = 0;
        int8_t etype;
        struct udf_inode_info *iinfo = UDF_I(inode);
@@ -787,7 +787,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
                if (ret < 0) {
                        *err = ret;
-                       newblock = 0;
                        goto out_free;
                }
                c = 0;
@@ -852,7 +851,6 @@ static sector_t inode_getblk(struct inode *inode, sector_t block,
                                goal, err);
                if (!newblocknum) {
                        *err = -ENOSPC;
-                       newblock = 0;
                        goto out_free;
                }
                if (isBeyondEOF)
index 98ac37e34e3d4bd4ac229e01a42b2e2a8a27685d..cc694846617a52bc874b86a85377b7a97811cc49 100644 (file)
@@ -108,6 +108,21 @@ static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx)
        return ctx->features & UFFD_FEATURE_INITIALIZED;
 }
 
+static void userfaultfd_set_vm_flags(struct vm_area_struct *vma,
+                                    vm_flags_t flags)
+{
+       const bool uffd_wp_changed = (vma->vm_flags ^ flags) & VM_UFFD_WP;
+
+       vma->vm_flags = flags;
+       /*
+        * For shared mappings, we want to enable writenotify while
+        * userfaultfd-wp is enabled (see vma_wants_writenotify()). We'll simply
+        * recalculate vma->vm_page_prot whenever userfaultfd-wp changes.
+        */
+       if ((vma->vm_flags & VM_SHARED) && uffd_wp_changed)
+               vma_set_page_prot(vma);
+}
+
 static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
                                     int wake_flags, void *key)
 {
@@ -618,7 +633,8 @@ static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
                for_each_vma(vmi, vma) {
                        if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx) {
                                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-                               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+                               userfaultfd_set_vm_flags(vma,
+                                                        vma->vm_flags & ~__VM_UFFD_FLAGS);
                        }
                }
                mmap_write_unlock(mm);
@@ -652,7 +668,7 @@ int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
        octx = vma->vm_userfaultfd_ctx.ctx;
        if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
                return 0;
        }
 
@@ -733,7 +749,7 @@ void mremap_userfaultfd_prep(struct vm_area_struct *vma,
        } else {
                /* Drop uffd context if remap feature not enabled */
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
-               vma->vm_flags &= ~__VM_UFFD_FLAGS;
+               userfaultfd_set_vm_flags(vma, vma->vm_flags & ~__VM_UFFD_FLAGS);
        }
 }
 
@@ -895,7 +911,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
                        prev = vma;
                }
 
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
        }
        mmap_write_unlock(mm);
@@ -1463,7 +1479,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx.ctx = ctx;
 
                if (is_vm_hugetlb_page(vma) && uffd_disable_huge_pmd_share(vma))
@@ -1651,7 +1667,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
                 * the next vma was merged into the current one and
                 * the current one has not been updated yet.
                 */
-               vma->vm_flags = new_flags;
+               userfaultfd_set_vm_flags(vma, new_flags);
                vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
 
        skip:
index 4c16c8c31fcbcdb117ad0c31eb4e2bcb149c9a6c..35f574421670da7a359ed5ae5bd1fbaa7f8dac71 100644 (file)
@@ -4666,7 +4666,12 @@ xfs_btree_space_to_height(
        const unsigned int      *limits,
        unsigned long long      leaf_blocks)
 {
-       unsigned long long      node_blocks = limits[1];
+       /*
+        * The root btree block can have fewer than minrecs pointers in it
+        * because the tree might not be big enough to require that amount of
+        * fanout. Hence it has a minimum size of 2 pointers, not limits[1].
+        */
+       unsigned long long      node_blocks = 2;
        unsigned long long      blocks_left = leaf_blocks - 1;
        unsigned int            height = 1;
 
index ad22a003f9595cbc3af195ba4d46f1feb3a122fb..f3d328e4a4408ba3866fa0910d35693e2af934ab 100644 (file)
@@ -236,6 +236,7 @@ xfs_extent_busy_update_extent(
                 *
                 */
                busyp->bno = fend;
+               busyp->length = bend - fend;
        } else if (bbno < fbno) {
                /*
                 * Case 8:
index f35e2cee5265589d83f3fc080092db064b051091..ddeaccc04aec94a3362efffb617121ca6157e0ba 100644 (file)
@@ -1853,12 +1853,20 @@ xfs_inodegc_worker(
                                                struct xfs_inodegc, work);
        struct llist_node       *node = llist_del_all(&gc->list);
        struct xfs_inode        *ip, *n;
+       unsigned int            nofs_flag;
 
        WRITE_ONCE(gc->items, 0);
 
        if (!node)
                return;
 
+       /*
+        * We can allocate memory here while doing writeback on behalf of
+        * memory reclaim.  To avoid memory allocation deadlocks set the
+        * task-wide nofs context for the following operations.
+        */
+       nofs_flag = memalloc_nofs_save();
+
        ip = llist_entry(node, struct xfs_inode, i_gclist);
        trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
 
@@ -1867,6 +1875,8 @@ xfs_inodegc_worker(
                xfs_iflags_set(ip, XFS_INACTIVATING);
                xfs_inodegc_inactivate(ip);
        }
+
+       memalloc_nofs_restore(nofs_flag);
 }
 
 /*
index 13f1b2add39041e0a6050e07440fdca207026d58..736510bc241b851d604b23c2874320cb8239c753 100644 (file)
@@ -754,7 +754,7 @@ xfs_bulkstat_fmt(
 static int
 xfs_bulk_ireq_setup(
        struct xfs_mount        *mp,
-       struct xfs_bulk_ireq    *hdr,
+       const struct xfs_bulk_ireq *hdr,
        struct xfs_ibulk        *breq,
        void __user             *ubuffer)
 {
@@ -780,7 +780,7 @@ xfs_bulk_ireq_setup(
 
                switch (hdr->ino) {
                case XFS_BULK_IREQ_SPECIAL_ROOT:
-                       hdr->ino = mp->m_sb.sb_rootino;
+                       breq->startino = mp->m_sb.sb_rootino;
                        break;
                default:
                        return -EINVAL;
index 669c1bc5c3a777d288008f3f90b0025b3ced7505..fc1946f80a4aac20dcd9755931206e0683290c7c 100644 (file)
@@ -83,7 +83,7 @@ xfs_iomap_valid(
        return true;
 }
 
-const struct iomap_page_ops xfs_iomap_page_ops = {
+static const struct iomap_page_ops xfs_iomap_page_ops = {
        .iomap_valid            = xfs_iomap_valid,
 };
 
index ff53d40a2dae3ad5443980aef6922dfbfa20be6b..e2c542f6dcd4d8c6ad3492a1f420515c68156db9 100644 (file)
@@ -68,7 +68,7 @@ restart:
 
        while (1) {
                struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
-               int             error = 0;
+               int             error;
                int             i;
 
                mutex_lock(&qi->qi_tree_lock);
index fe46bce8cae6382442244183a29c5ec506fe35c4..5535778a98f925d0ce158cc64f09c3981fe3670e 100644 (file)
@@ -416,8 +416,6 @@ xfs_reflink_fill_cow_hole(
                goto convert;
        }
 
-       ASSERT(cmap->br_startoff > imap->br_startoff);
-
        /* Allocate the entire reservation as unwritten blocks. */
        nimaps = 1;
        error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
index 2c53fbb8d918e5ca63ab3a620455b1f173883251..a9c5c3f720adf86f2afd73a73aec327c2f83399b 100644 (file)
@@ -442,6 +442,10 @@ static int zonefs_io_error_cb(struct blk_zone *zone, unsigned int idx,
                        data_size = zonefs_check_zone_condition(inode, zone,
                                                                false, false);
                }
+       } else if (sbi->s_mount_opts & ZONEFS_MNTOPT_ERRORS_RO &&
+                  data_size > isize) {
+               /* Do not expose garbage data */
+               data_size = isize;
        }
 
        /*
@@ -805,6 +809,24 @@ static ssize_t zonefs_file_dio_append(struct kiocb *iocb, struct iov_iter *from)
 
        ret = submit_bio_wait(bio);
 
+       /*
+        * If the file zone was written underneath the file system, the zone
+        * write pointer may not be where we expect it to be, but the zone
+        * append write can still succeed. So check manually that we wrote where
+        * we intended to, that is, at zi->i_wpoffset.
+        */
+       if (!ret) {
+               sector_t wpsector =
+                       zi->i_zsector + (zi->i_wpoffset >> SECTOR_SHIFT);
+
+               if (bio->bi_iter.bi_sector != wpsector) {
+                       zonefs_warn(inode->i_sb,
+                               "Corrupted write pointer %llu for zone at %llu\n",
+                               wpsector, zi->i_zsector);
+                       ret = -EIO;
+               }
+       }
+
        zonefs_file_write_dio_end_io(iocb, size, ret, 0);
        trace_zonefs_file_dio_append(inode, size, ret);
 
index cd3b75e08ec3fa67d4b4bff40269ca49fa23a20f..e44be31115a6746f079b6a5cc86bbdc7e01cb659 100644 (file)
@@ -230,7 +230,8 @@ struct acpi_pnp_type {
        u32 hardware_id:1;
        u32 bus_address:1;
        u32 platform_id:1;
-       u32 reserved:29;
+       u32 backlight:1;
+       u32 reserved:28;
 };
 
 struct acpi_device_pnp {
index a275c35e5249da96797e02b45c4a57048d0b9cda..8ed9bec03e5341ce3669a59ce8f882b1a42cf21e 100644 (file)
@@ -53,6 +53,7 @@ enum acpi_backlight_type {
 };
 
 #if IS_ENABLED(CONFIG_ACPI_VIDEO)
+extern void acpi_video_report_nolcd(void);
 extern int acpi_video_register(void);
 extern void acpi_video_unregister(void);
 extern void acpi_video_register_backlight(void);
@@ -69,6 +70,7 @@ extern int acpi_video_get_levels(struct acpi_device *device,
                                 struct acpi_video_device_brightness **dev_br,
                                 int *pmax_level);
 #else
+static inline void acpi_video_report_nolcd(void) { return; };
 static inline int acpi_video_register(void) { return -ENODEV; }
 static inline void acpi_video_unregister(void) { return; }
 static inline void acpi_video_register_backlight(void) { return; }
index a94219e9916f185acef402efca6538503a1c7680..659bf3b31c9111053494da883253dff5ac9021a7 100644 (file)
 #define PRINTK_INDEX
 #endif
 
+/*
+ * Discard .note.GNU-stack, which is emitted as PROGBITS by the compiler.
+ * Otherwise, the type of .notes section would become PROGBITS instead of NOTES.
+ */
 #define NOTES                                                          \
+       /DISCARD/ : { *(.note.GNU-stack) }                              \
        .notes : AT(ADDR(.notes) - LOAD_OFFSET) {                       \
                BOUNDED_SECTION_BY(.note.*, _notes)                     \
        } NOTES_HEADERS                                                 \
index ff83d26216876af7368e9ceb996e77ddc6ee74da..3a574e8cd22f439cec7a3a3f164df39d67d23c37 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/types.h>
 
+struct drm_atomic_state;
 struct drm_crtc;
 struct drm_framebuffer;
 struct drm_modeset_acquire_ctx;
index 22078a28d7cb1316ddb800bed8c5c28242c8d1ba..c1da63f6c80800f6369d8fc972551a27a2ff3b58 100644 (file)
@@ -475,6 +475,8 @@ void __bio_release_pages(struct bio *bio, bool mark_dirty);
 extern void bio_set_pages_dirty(struct bio *bio);
 extern void bio_check_pages_dirty(struct bio *bio);
 
+extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
+                              struct bio *src, struct bvec_iter *src_iter);
 extern void bio_copy_data(struct bio *dst, struct bio *src);
 extern void bio_free_pages(struct bio *bio);
 void guard_bio_eod(struct bio *bio);
index 301cf1cf4f2facbe4ad3e44c6a166761437c25bd..43d4e073b1115e4628a001081fbf08b296d342df 100644 (file)
@@ -1395,6 +1395,7 @@ struct block_device_operations {
        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
        int (*report_zones)(struct gendisk *, sector_t sector,
                        unsigned int nr_zones, report_zones_cb cb, void *data);
+       char *(*devnode)(struct gendisk *disk, umode_t *mode);
        /* returns the length of the identifier or a negative errno: */
        int (*get_unique_id)(struct gendisk *disk, u8 id[16],
                        enum blk_unique_id id_type);
index 3de24cfb7a3dedfa602e59b84cfb15230ae77ce8..634d37a599fa71984172f22958cdfa37cc2a553b 100644 (file)
@@ -1832,7 +1832,7 @@ void bpf_prog_inc(struct bpf_prog *prog);
 struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 
-void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
+void bpf_prog_free_id(struct bpf_prog *prog);
 void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
 
 struct btf_field *btf_record_find(const struct btf_record *rec,
index b1b5720d89a596d12eb5cbe9591d0e9e3c5bf918..ee657452f122a867a011980567c08db7b97d7e4c 100644 (file)
@@ -45,8 +45,8 @@ struct sk_buff;
                                        QCA_HDR_MGMT_COMMAND_LEN + \
                                        QCA_HDR_MGMT_DATA1_LEN)
 
-#define QCA_HDR_MGMT_DATA2_LEN         12 /* Other 12 byte for the mdio data */
-#define QCA_HDR_MGMT_PADDING_LEN       34 /* Padding to reach the min Ethernet packet */
+#define QCA_HDR_MGMT_DATA2_LEN         28 /* Other 28 byte for the mdio data */
+#define QCA_HDR_MGMT_PADDING_LEN       18 /* Padding to reach the min Ethernet packet */
 
 #define QCA_HDR_MGMT_PKT_LEN           (QCA_HDR_MGMT_HEADER_LEN + \
                                        QCA_HDR_LEN + \
index 9ec81290e3c893789cd60b64bdb44b17df00eec9..bd5560542c799e1bc9ec31a8651876fb1a8b4351 100644 (file)
@@ -105,14 +105,14 @@ int elf_core_copy_task_fpregs(struct task_struct *t, elf_fpregset_t *fpu);
  * Dumping its extra ELF program headers includes all the other information
  * a debugger needs to easily find how the gate DSO was being used.
  */
-extern Elf_Half elf_core_extra_phdrs(void);
+extern Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm);
 extern int
 elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset);
 extern int
 elf_core_write_extra_data(struct coredump_params *cprm);
-extern size_t elf_core_extra_data_size(void);
+extern size_t elf_core_extra_data_size(struct coredump_params *cprm);
 #else
-static inline Elf_Half elf_core_extra_phdrs(void)
+static inline Elf_Half elf_core_extra_phdrs(struct coredump_params *cprm)
 {
        return 0;
 }
@@ -127,7 +127,7 @@ static inline int elf_core_write_extra_data(struct coredump_params *cprm)
        return 1;
 }
 
-static inline size_t elf_core_extra_data_size(void)
+static inline size_t elf_core_extra_data_size(struct coredump_params *cprm)
 {
        return 0;
 }
index b986e267d149bed6c16fc9b1ceec49b839f8f32c..b09f443d3ab9dfeef354ec3411b70f67606fe3cd 100644 (file)
@@ -545,8 +545,8 @@ int zynqmp_pm_request_wake(const u32 node,
                           const u64 address,
                           const enum zynqmp_pm_request_ack ack);
 int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode);
-int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1);
-int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1);
+int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode);
+int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode);
 int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value);
 int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
                             u32 value);
@@ -845,12 +845,12 @@ static inline int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mo
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_set_rpu_mode(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode)
 {
        return -ENODEV;
 }
 
-static inline int zynqmp_pm_set_tcm_config(u32 node_id, u32 arg1)
+static inline int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode)
 {
        return -ENODEV;
 }
index 066555ad1bf810b7a57437cc2b3607c9ec5d52f9..c1769a2c5d7082858e3fac64b261b5e5244d9b01 100644 (file)
@@ -1119,6 +1119,9 @@ struct file_lock {
                        int state;              /* state of grant or error if -ve */
                        unsigned int    debug_id;
                } afs;
+               struct {
+                       struct inode *inode;
+               } ceph;
        } fl_u;
 } __randomize_layout;
 
index dcd8a563ab522f2593fd71b8e5dc8bf3dcaea0ab..128a67a40065f25292035455f603075f8504fb8d 100644 (file)
@@ -292,6 +292,8 @@ struct io_ring_ctx {
        struct {
                spinlock_t              completion_lock;
 
+               bool                    poll_multi_queue;
+
                /*
                 * ->iopoll_list is protected by the ctx->uring_lock for
                 * io_uring instances that don't use IORING_SETUP_SQPOLL.
@@ -300,7 +302,6 @@ struct io_ring_ctx {
                 */
                struct io_wq_work_list  iopoll_list;
                struct io_hash_table    cancel_table;
-               bool                    poll_multi_queue;
 
                struct llist_head       work_llist;
 
index 5fe5d198b57ade8b1ec02435f63da2fa0e17aebd..29d4b201c7b26dd8d6a6a698bbc4f9d042ae1206 100644 (file)
@@ -1090,6 +1090,11 @@ enum {
        MLX5_VPORT_ADMIN_STATE_AUTO  = 0x2,
 };
 
+enum {
+       MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN  = 0x1,
+       MLX5_VPORT_CVLAN_INSERT_ALWAYS         = 0x3,
+};
+
 enum {
        MLX5_L3_PROT_TYPE_IPV4          = 0,
        MLX5_L3_PROT_TYPE_IPV6          = 1,
index d476255c9a3f0d9ea7f0dcd3c8231fbcd9e30d3e..76ef2e4fde38d6da43e62f9c5a064212f8619736 100644 (file)
@@ -315,7 +315,7 @@ struct mlx5_cmd {
        struct mlx5_cmd_debug dbg;
        struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
        int checksum_disabled;
-       struct mlx5_cmd_stats *stats;
+       struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX];
 };
 
 struct mlx5_cmd_mailbox {
index f3d1c62c98ddc4d6d34e7818dd0284d1e4c3a621..a9ee7bc59c90199126d15ed61dbc20248846c0c0 100644 (file)
@@ -913,7 +913,8 @@ struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x2];
+       u8         reserved_at_5[0x1];
+       u8         vport_cvlan_insert_always[0x1];
        u8         esw_shared_ingress_acl[0x1];
        u8         esw_uplink_ingress_acl[0x1];
        u8         root_ft_on_other_esw[0x1];
index f3f196e4d66d6f42c74366731890a5d4102bf75b..8f857163ac89c4ad28e9b5f79899fdc630a2f529 100644 (file)
@@ -1270,10 +1270,10 @@ static inline void folio_put_refs(struct folio *folio, int refs)
                __folio_put(folio);
 }
 
-/**
- * release_pages - release an array of pages or folios
+/*
+ * union release_pages_arg - an array of pages or folios
  *
- * This just releases a simple array of multiple pages, and
+ * release_pages() releases a simple array of multiple pages, and
  * accepts various different forms of said page array: either
  * a regular old boring array of pages, an array of folios, or
  * an array of encoded page pointers.
index e8ed225d8f7cac74c5e44ea5417fb757eedcd920..ff3f3f23f649892ae81683eb69d861a7a8a28df5 100644 (file)
@@ -413,8 +413,7 @@ static inline void free_anon_vma_name(struct vm_area_struct *vma)
         * Not using anon_vma_name because it generates a warning if mmap_lock
         * is not held, which might be the case here.
         */
-       if (!vma->vm_file)
-               anon_vma_name_put(vma->anon_name);
+       anon_vma_name_put(vma->anon_name);
 }
 
 static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
index 3b8475007734dcaf227e35cad41bd77ada806a49..9757067c305374787f9afbba30b77449b52b305e 100644 (file)
@@ -581,7 +581,7 @@ struct vm_area_struct {
        /*
         * For private and shared anonymous mappings, a pointer to a null
         * terminated string containing the name given to the vma, or NULL if
-        * unnamed. Serialized by mmap_sem. Use anon_vma_name to access.
+        * unnamed. Serialized by mmap_lock. Use anon_vma_name to access.
         */
        struct anon_vma_name *anon_name;
 #endif
index 25765556223a1fa173ff03a33619b83f76db3644..a3f8cdca90c8320ae55cdbd30e5e7c38999856c8 100644 (file)
@@ -7,7 +7,6 @@
 #define __LINUX_MTD_SPI_NOR_H
 
 #include <linux/bitops.h>
-#include <linux/mtd/cfi.h>
 #include <linux/mtd/mtd.h>
 #include <linux/spi/spi-mem.h>
 
index ab934ad951a8706a2ee228ab7fb70c35f9260328..e8c350a3ade153d852bec011dbd3c72a352d319d 100644 (file)
@@ -197,7 +197,7 @@ struct ip_set_region {
 };
 
 /* Max range where every element is added/deleted in one step */
-#define IPSET_MAX_RANGE                (1<<20)
+#define IPSET_MAX_RANGE                (1<<14)
 
 /* The max revision number supported by any set type + 1 */
 #define IPSET_REVISION_MAX     9
index d6be2a6861000e970050ee3a72730297fa68f247..4fad4aa245fb0621ee3adbec1793981ae54f1ea7 100644 (file)
@@ -7,6 +7,7 @@
 #ifndef _LINUX_NVME_H
 #define _LINUX_NVME_H
 
+#include <linux/bits.h>
 #include <linux/types.h>
 #include <linux/uuid.h>
 
@@ -639,8 +640,9 @@ enum {
        NVME_CMD_EFFECTS_NCC            = 1 << 2,
        NVME_CMD_EFFECTS_NIC            = 1 << 3,
        NVME_CMD_EFFECTS_CCC            = 1 << 4,
-       NVME_CMD_EFFECTS_CSE_MASK       = 3 << 16,
+       NVME_CMD_EFFECTS_CSE_MASK       = GENMASK(18, 16),
        NVME_CMD_EFFECTS_UUID_SEL       = 1 << 19,
+       NVME_CMD_EFFECTS_SCOPE_MASK     = GENMASK(31, 20),
 };
 
 struct nvme_effects_log {
index 2e677e6ad09fcf72e05c3f86d5048bf43e2f9258..d7c2d33baa7f8e2f0da7f4ae091dc5e92e1b559c 100644 (file)
@@ -301,7 +301,7 @@ static inline bool folio_ref_try_add_rcu(struct folio *folio, int count)
  *
  * You can also use this function if you're holding a lock that prevents
  * pages being frozen & removed; eg the i_pages lock for the page cache
- * or the mmap_sem or page table lock for page tables.  In this case,
+ * or the mmap_lock or page table lock for page tables.  In this case,
  * it will always succeed, and you could have used a plain folio_get(),
  * but it's sometimes more convenient to have a common function called
  * from both locked and RCU-protected contexts.
index 71eeb4e3b1fdeda38c8d3badafdfaaa6229ac768..6378c997ded56c9c4c5999a2c45da2c0600614e9 100644 (file)
@@ -826,10 +826,7 @@ struct phy_driver {
         * whether to advertise lower-speed modes for that interface. It is
         * assumed that if a rate matching mode is supported on an interface,
         * then that interface's rate can be adapted to all slower link speeds
-        * supported by the phy. If iface is %PHY_INTERFACE_MODE_NA, and the phy
-        * supports any kind of rate matching for any interface, then it must
-        * return that rate matching mode (preferring %RATE_MATCH_PAUSE to
-        * %RATE_MATCH_CRS). If the interface is not supported, this should
+        * supported by the phy. If the interface is not supported, this should
         * return %RATE_MATCH_NONE.
         */
        int (*get_rate_matching)(struct phy_device *phydev,
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
new file mode 100644 (file)
index 0000000..f9c5ac8
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef __PKTCDVD_H
+#define __PKTCDVD_H
+
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/mempool.h>
+#include <uapi/linux/pktcdvd.h>
+
+/* default bio write queue congestion marks */
+#define PKT_WRITE_CONGESTION_ON    10000
+#define PKT_WRITE_CONGESTION_OFF   9000
+
+
+struct packet_settings
+{
+       __u32                   size;           /* packet size in (512 byte) sectors */
+       __u8                    fp;             /* fixed packets */
+       __u8                    link_loss;      /* the rest is specified
+                                                * as per Mt Fuji */
+       __u8                    write_type;
+       __u8                    track_mode;
+       __u8                    block_mode;
+};
+
+/*
+ * Very crude stats for now
+ */
+struct packet_stats
+{
+       unsigned long           pkt_started;
+       unsigned long           pkt_ended;
+       unsigned long           secs_w;
+       unsigned long           secs_rg;
+       unsigned long           secs_r;
+};
+
+struct packet_cdrw
+{
+       struct list_head        pkt_free_list;
+       struct list_head        pkt_active_list;
+       spinlock_t              active_list_lock; /* Serialize access to pkt_active_list */
+       struct task_struct      *thread;
+       atomic_t                pending_bios;
+};
+
+/*
+ * Switch to high speed reading after reading this many kilobytes
+ * with no interspersed writes.
+ */
+#define HI_SPEED_SWITCH 512
+
+struct packet_iosched
+{
+       atomic_t                attention;      /* Set to non-zero when queue processing is needed */
+       int                     writing;        /* Non-zero when writing, zero when reading */
+       spinlock_t              lock;           /* Protecting read/write queue manipulations */
+       struct bio_list         read_queue;
+       struct bio_list         write_queue;
+       sector_t                last_write;     /* The sector where the last write ended */
+       int                     successive_reads;
+};
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#if (PAGE_SIZE % CD_FRAMESIZE) != 0
+#error "PAGE_SIZE must be a multiple of CD_FRAMESIZE"
+#endif
+#define PACKET_MAX_SIZE                128
+#define FRAMES_PER_PAGE                (PAGE_SIZE / CD_FRAMESIZE)
+#define PACKET_MAX_SECTORS     (PACKET_MAX_SIZE * CD_FRAMESIZE >> 9)
+
+enum packet_data_state {
+       PACKET_IDLE_STATE,                      /* Not used at the moment */
+       PACKET_WAITING_STATE,                   /* Waiting for more bios to arrive, so */
+                                               /* we don't have to do as much */
+                                               /* data gathering */
+       PACKET_READ_WAIT_STATE,                 /* Waiting for reads to fill in holes */
+       PACKET_WRITE_WAIT_STATE,                /* Waiting for the write to complete */
+       PACKET_RECOVERY_STATE,                  /* Recover after read/write errors */
+       PACKET_FINISHED_STATE,                  /* After write has finished */
+
+       PACKET_NUM_STATES                       /* Number of possible states */
+};
+
+/*
+ * Information needed for writing a single packet
+ */
+struct pktcdvd_device;
+
+struct packet_data
+{
+       struct list_head        list;
+
+       spinlock_t              lock;           /* Lock protecting state transitions and */
+                                               /* orig_bios list */
+
+       struct bio_list         orig_bios;      /* Original bios passed to pkt_make_request */
+                                               /* that will be handled by this packet */
+       int                     write_size;     /* Total size of all bios in the orig_bios */
+                                               /* list, measured in number of frames */
+
+       struct bio              *w_bio;         /* The bio we will send to the real CD */
+                                               /* device once we have all data for the */
+                                               /* packet we are going to write */
+       sector_t                sector;         /* First sector in this packet */
+       int                     frames;         /* Number of frames in this packet */
+
+       enum packet_data_state  state;          /* Current state */
+       atomic_t                run_sm;         /* Incremented whenever the state */
+                                               /* machine needs to be run */
+       long                    sleep_time;     /* Set this to non-zero to make the state */
+                                               /* machine run after this many jiffies. */
+
+       atomic_t                io_wait;        /* Number of pending IO operations */
+       atomic_t                io_errors;      /* Number of read/write errors during IO */
+
+       struct bio              *r_bios[PACKET_MAX_SIZE]; /* bios to use during data gathering */
+       struct page             *pages[PACKET_MAX_SIZE / FRAMES_PER_PAGE];
+
+       int                     cache_valid;    /* If non-zero, the data for the zone defined */
+                                               /* by the sector variable is completely cached */
+                                               /* in the pages[] vector. */
+
+       int                     id;             /* ID number for debugging */
+       struct pktcdvd_device   *pd;
+};
+
+struct pkt_rb_node {
+       struct rb_node          rb_node;
+       struct bio              *bio;
+};
+
+struct packet_stacked_data
+{
+       struct bio              *bio;           /* Original read request bio */
+       struct pktcdvd_device   *pd;
+};
+#define PSD_POOL_SIZE          64
+
+struct pktcdvd_device
+{
+       struct block_device     *bdev;          /* dev attached */
+       dev_t                   pkt_dev;        /* our dev */
+       char                    name[20];
+       struct packet_settings  settings;
+       struct packet_stats     stats;
+       int                     refcnt;         /* Open count */
+       int                     write_speed;    /* current write speed, kB/s */
+       int                     read_speed;     /* current read speed, kB/s */
+       unsigned long           offset;         /* start offset */
+       __u8                    mode_offset;    /* 0 / 8 */
+       __u8                    type;
+       unsigned long           flags;
+       __u16                   mmc3_profile;
+       __u32                   nwa;            /* next writable address */
+       __u32                   lra;            /* last recorded address */
+       struct packet_cdrw      cdrw;
+       wait_queue_head_t       wqueue;
+
+       spinlock_t              lock;           /* Serialize access to bio_queue */
+       struct rb_root          bio_queue;      /* Work queue of bios we need to handle */
+       int                     bio_queue_size; /* Number of nodes in bio_queue */
+       bool                    congested;      /* Someone is waiting for bio_queue_size
+                                                * to drop. */
+       sector_t                current_sector; /* Keep track of where the elevator is */
+       atomic_t                scan_queue;     /* Set to non-zero when pkt_handle_queue */
+                                               /* needs to be run. */
+       mempool_t               rb_pool;        /* mempool for pkt_rb_node allocations */
+
+       struct packet_iosched   iosched;
+       struct gendisk          *disk;
+
+       int                     write_congestion_off;
+       int                     write_congestion_on;
+
+       struct device           *dev;           /* sysfs pktcdvd[0-7] dev */
+
+       struct dentry           *dfs_d_root;    /* debugfs: devname directory */
+       struct dentry           *dfs_f_info;    /* debugfs: info file */
+};
+
+#endif /* __PKTCDVD_H */
index 632320ec8f0826d6dfa8bafb6402aab65ae4b7b1..a48bb524097770892a2ff495e6304d8c9b4f00e8 100644 (file)
@@ -32,7 +32,8 @@ enum simatic_ipc_station_ids {
        SIMATIC_IPC_IPC477E = 0x00000A02,
        SIMATIC_IPC_IPC127E = 0x00000D01,
        SIMATIC_IPC_IPC227G = 0x00000F01,
-       SIMATIC_IPC_IPC427G = 0x00001001,
+       SIMATIC_IPC_IPCBX_39A = 0x00001001,
+       SIMATIC_IPC_IPCPX_39A = 0x00001002,
 };
 
 static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
index f7f12728d4a637611e43c24df2cbce9867e960e9..9a60f45899d3c4e19a31cbf91272635a21d847ba 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef __ASSEMBLER__
 #include <linux/types.h>
 
-#ifdef CONFIG_ARCH_OMAP1_ANY
+#ifdef CONFIG_ARCH_OMAP1
 /*
  * NOTE: Please use ioremap + __raw_read/write where possible instead of these
  */
@@ -15,7 +15,7 @@ extern u32 omap_readl(u32 pa);
 extern void omap_writeb(u8 v, u32 pa);
 extern void omap_writew(u16 v, u32 pa);
 extern void omap_writel(u32 v, u32 pa);
-#else
+#elif defined(CONFIG_COMPILE_TEST)
 static inline u8 omap_readb(u32 pa)  { return 0; }
 static inline u16 omap_readw(u32 pa) { return 0; }
 static inline u32 omap_readl(u32 pa) { return 0; }
index cd188a527d169025398caad1d21aa86db5f1b7c7..3b35b6f6533aab0553b87757ff5a73ccffa44610 100644 (file)
@@ -92,6 +92,11 @@ extern ssize_t rpc_pipe_generic_upcall(struct file *, struct rpc_pipe_msg *,
                                       char __user *, size_t);
 extern int rpc_queue_upcall(struct rpc_pipe *, struct rpc_pipe_msg *);
 
+/* returns true if the msg is in-flight, i.e., already eaten by the peer */
+static inline bool rpc_msg_is_inflight(const struct rpc_pipe_msg *msg) {
+       return (msg->copied != 0 && list_empty(&msg->list));
+}
+
 struct rpc_clnt;
 extern struct dentry *rpc_create_client_dir(struct dentry *, const char *, struct rpc_clnt *);
 extern int rpc_remove_client_dir(struct rpc_clnt *);
index 20c0ff54b7a0d313649d9df4ba6b9217615e2c77..7d68a5cc588163303a003265d5530d9542c20699 100644 (file)
@@ -198,8 +198,8 @@ static __always_inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *ev
         * The loop below will unmap these fields if the log is larger than
         * one page, so save them here for reference:
         */
-       count = READ_ONCE(event->count);
-       event_type = READ_ONCE(event->event_type);
+       count = event->count;
+       event_type = event->event_type;
 
        /* Verify that it's the log header */
        if (event_header->pcr_idx != 0 ||
index 7d5325d47c451dacf0108af5e607e4dac0b78745..86d1c8e79566ee040defebb85df3d85a96b9c3d5 100644 (file)
@@ -267,16 +267,15 @@ static inline void *usb_get_intfdata(struct usb_interface *intf)
 }
 
 /**
- * usb_set_intfdata() - associate driver-specific data with the interface
- * @intf: the usb interface
- * @data: pointer to the device priv structure or %NULL
+ * usb_set_intfdata() - associate driver-specific data with an interface
+ * @intf: USB interface
+ * @data: driver data
  *
- * Drivers should use this function in their probe() to associate their
- * driver-specific data with the usb interface.
+ * Drivers can use this function in their probe() callbacks to associate
+ * driver-specific data with an interface.
  *
- * When disconnecting, the core will take care of setting @intf back to %NULL,
- * so no actions are needed on the driver side. The interface should not be set
- * to %NULL before all actions completed (e.g. no outsanding URB remaining).
+ * Note that there is generally no need to clear the driver-data pointer even
+ * if some drivers do so for historical or implementation-specific reasons.
  */
 static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
 {
@@ -774,11 +773,14 @@ extern struct device *usb_intf_get_dma_device(struct usb_interface *intf);
 extern int usb_acpi_set_power_state(struct usb_device *hdev, int index,
        bool enable);
 extern bool usb_acpi_power_manageable(struct usb_device *hdev, int index);
+extern int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index);
 #else
 static inline int usb_acpi_set_power_state(struct usb_device *hdev, int index,
        bool enable) { return 0; }
 static inline bool usb_acpi_power_manageable(struct usb_device *hdev, int index)
        { return true; }
+static inline int usb_acpi_port_lpm_incapable(struct usb_device *hdev, int index)
+       { return 0; }
 #endif
 
 /* USB autosuspend and autoresume */
index d5a5ae9263804a8de43373cd2497cb9fc9c0144c..ba717eac0229a26ecfc303840509108e61be5d3e 100644 (file)
@@ -15,6 +15,7 @@ struct key;
 struct sock;
 struct socket;
 struct rxrpc_call;
+enum rxrpc_abort_reason;
 
 enum rxrpc_interruptibility {
        RXRPC_INTERRUPTIBLE,    /* Call is interruptible */
@@ -55,7 +56,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
 int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
                           struct iov_iter *, size_t *, bool, u32 *, u16 *);
 bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
-                            u32, int, const char *);
+                            u32, int, enum rxrpc_abort_reason);
 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
                           struct sockaddr_rxrpc *);
index 69174093078f02cbcf592c1ca1532f51d393f29a..99bd823e97f624e37cf3abb8ac0ec7eef4552b61 100644 (file)
@@ -108,6 +108,10 @@ struct inet_bind2_bucket {
        struct hlist_node       node;
        /* List of sockets hashed to this bucket */
        struct hlist_head       owners;
+       /* bhash has twsk in owners, but bhash2 has twsk in
+        * deathrow not to add a member in struct sock_common.
+        */
+       struct hlist_head       deathrow;
 };
 
 static inline struct net *ib_net(const struct inet_bind_bucket *ib)
index 5b47545f22d39eb2dd9725ac37bd7d7a9016a03c..4a8e578405cb37856f07dac6ad0f1809f786682a 100644 (file)
@@ -73,9 +73,14 @@ struct inet_timewait_sock {
        u32                     tw_priority;
        struct timer_list       tw_timer;
        struct inet_bind_bucket *tw_tb;
+       struct inet_bind2_bucket        *tw_tb2;
+       struct hlist_node               tw_bind2_node;
 };
 #define tw_tclass tw_tos
 
+#define twsk_for_each_bound_bhash2(__tw, list) \
+       hlist_for_each_entry(__tw, list, tw_bind2_node)
+
 static inline struct inet_timewait_sock *inet_twsk(const struct sock *sk)
 {
        return (struct inet_timewait_sock *)sk;
index 689da327ce2e8113a1478e4668237d5d1b40a9cc..e3235b9c02c21fc3ef5d25df55c07cf3f6d14c49 100644 (file)
@@ -1832,8 +1832,6 @@ struct ieee80211_vif_cfg {
  * @drv_priv: data area for driver use, will always be aligned to
  *     sizeof(void \*).
  * @txq: the multicast data TX queue
- * @txqs_stopped: per AC flag to indicate that intermediate TXQs are stopped,
- *     protected by fq->lock.
  * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
  *     &enum ieee80211_offload_flags.
  * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
@@ -1863,8 +1861,6 @@ struct ieee80211_vif {
        bool probe_req_reg;
        bool rx_mcast_action_reg;
 
-       bool txqs_stopped[IEEE80211_NUM_ACS];
-
        struct ieee80211_vif *mbssid_tx_vif;
 
        /* must be last */
index e69ce23566eab880f1c75b7c59161ab9e20930ec..9430128aae991144f6e24d7b373a04153d6e3f0e 100644 (file)
@@ -312,17 +312,29 @@ struct nft_set_iter {
 /**
  *     struct nft_set_desc - description of set elements
  *
+ *     @ktype: key type
  *     @klen: key length
+ *     @dtype: data type
  *     @dlen: data length
+ *     @objtype: object type
+ *     @flags: flags
  *     @size: number of set elements
+ *     @policy: set policy
+ *     @gc_int: garbage collector interval
  *     @field_len: length of each field in concatenation, bytes
  *     @field_count: number of concatenated fields in element
  *     @expr: set must support for expressions
  */
 struct nft_set_desc {
+       u32                     ktype;
        unsigned int            klen;
+       u32                     dtype;
        unsigned int            dlen;
+       u32                     objtype;
        unsigned int            size;
+       u32                     policy;
+       u32                     gc_int;
+       u64                     timeout;
        u8                      field_len[NFT_REG32_COUNT];
        u8                      field_count;
        bool                    expr;
@@ -585,7 +597,9 @@ void *nft_set_catchall_gc(const struct nft_set *set);
 
 static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
 {
-       return set->gc_int ? msecs_to_jiffies(set->gc_int) : HZ;
+       u32 gc_int = READ_ONCE(set->gc_int);
+
+       return gc_int ? msecs_to_jiffies(gc_int) : HZ;
 }
 
 /**
@@ -1558,6 +1572,9 @@ struct nft_trans_rule {
 struct nft_trans_set {
        struct nft_set                  *set;
        u32                             set_id;
+       u32                             gc_int;
+       u64                             timeout;
+       bool                            update;
        bool                            bound;
 };
 
@@ -1567,6 +1584,12 @@ struct nft_trans_set {
        (((struct nft_trans_set *)trans->data)->set_id)
 #define nft_trans_set_bound(trans)     \
        (((struct nft_trans_set *)trans->data)->bound)
+#define nft_trans_set_update(trans)    \
+       (((struct nft_trans_set *)trans->data)->update)
+#define nft_trans_set_timeout(trans)   \
+       (((struct nft_trans_set *)trans->data)->timeout)
+#define nft_trans_set_gc_int(trans)    \
+       (((struct nft_trans_set *)trans->data)->gc_int)
 
 struct nft_trans_chain {
        bool                            update;
index d5517719af4ef22282f0a15b132f8e8a07ae4179..af4aa66aaa4eba8f2eacdd00bc8fef31165c6a90 100644 (file)
@@ -1288,4 +1288,11 @@ void mq_change_real_num_tx(struct Qdisc *sch, unsigned int new_real_tx);
 
 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb));
 
+/* Make sure qdisc is no longer in SCHED state. */
+static inline void qdisc_synchronize(const struct Qdisc *q)
+{
+       while (test_bit(__QDISC_STATE_SCHED, &q->state))
+               msleep(1);
+}
+
 #endif
index ceed2fc089ffa3f183d68959d2c9b4420cd8ccd7..d323fffb839aac4424ae1b552786663c548d9757 100644 (file)
@@ -216,6 +216,8 @@ skip:
        return tp->classify(skb, tp, res);
 }
 
+#endif /* CONFIG_NET_CLS */
+
 static inline void tc_wrapper_init(void)
 {
 #ifdef CONFIG_X86
@@ -224,8 +226,6 @@ static inline void tc_wrapper_init(void)
 #endif
 }
 
-#endif /* CONFIG_NET_CLS */
-
 #else
 
 #define TC_INDIRECT_SCOPE static
index cab52b0f11d0cad5a608795143b138e09c9bc955..34c03707fb6efe23d04943a443252cbfbf22686d 100644 (file)
@@ -236,6 +236,14 @@ enum {
        ISCSI_SESSION_FREE,
 };
 
+enum {
+       ISCSI_SESSION_TARGET_UNBOUND,
+       ISCSI_SESSION_TARGET_ALLOCATED,
+       ISCSI_SESSION_TARGET_SCANNED,
+       ISCSI_SESSION_TARGET_UNBINDING,
+       ISCSI_SESSION_TARGET_MAX,
+};
+
 #define ISCSI_MAX_TARGET -1
 
 struct iscsi_cls_session {
@@ -264,6 +272,7 @@ struct iscsi_cls_session {
         */
        pid_t creator;
        int state;
+       int target_state;                       /* session target bind state */
        int sid;                                /* session id */
        void *dd_data;                          /* LLD private data */
        struct device dev;      /* sysfs transport/container device */
index ab955591cb72c26598676772e3998f7e1e2b268e..73cac8d0287e89b5d469722c753d137ae549ea52 100644 (file)
@@ -170,7 +170,7 @@ struct rpi_firmware_clk_rate_request {
 
 #define RPI_FIRMWARE_CLK_RATE_REQUEST(_id)     \
        {                                       \
-               .id = _id,                      \
+               .id = cpu_to_le32(_id),         \
        }
 
 #if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
index 0bce0b4ff2faf32d9e7a511b127c9a8c87f067be..6548b5b5aa60803c571a2b5fee19ea53dcfc1ee2 100644 (file)
@@ -98,7 +98,7 @@ struct raid56_bio_trace_info;
        EM( FLUSH_DELALLOC_WAIT,        "FLUSH_DELALLOC_WAIT")          \
        EM( FLUSH_DELALLOC_FULL,        "FLUSH_DELALLOC_FULL")          \
        EM( FLUSH_DELAYED_REFS_NR,      "FLUSH_DELAYED_REFS_NR")        \
-       EM( FLUSH_DELAYED_REFS,         "FLUSH_ELAYED_REFS")            \
+       EM( FLUSH_DELAYED_REFS,         "FLUSH_DELAYED_REFS")           \
        EM( ALLOC_CHUNK,                "ALLOC_CHUNK")                  \
        EM( ALLOC_CHUNK_FORCE,          "ALLOC_CHUNK_FORCE")            \
        EM( RUN_DELAYED_IPUTS,          "RUN_DELAYED_IPUTS")            \
index c6cfed00d0c6c4e3efb3be88b21e727ea2dcc7d1..283db0ea3db4b37edda6c81f3c84213dc79d577c 100644 (file)
 /*
  * Declare tracing information enums and their string mappings for display.
  */
+#define rxrpc_abort_reasons \
+       /* AFS errors */                                                \
+       EM(afs_abort_general_error,             "afs-error")            \
+       EM(afs_abort_interrupted,               "afs-intr")             \
+       EM(afs_abort_oom,                       "afs-oom")              \
+       EM(afs_abort_op_not_supported,          "afs-op-notsupp")       \
+       EM(afs_abort_probeuuid_negative,        "afs-probeuuid-neg")    \
+       EM(afs_abort_send_data_error,           "afs-send-data")        \
+       EM(afs_abort_unmarshal_error,           "afs-unmarshal")        \
+       /* rxperf errors */                                             \
+       EM(rxperf_abort_general_error,          "rxperf-error")         \
+       EM(rxperf_abort_oom,                    "rxperf-oom")           \
+       EM(rxperf_abort_op_not_supported,       "rxperf-op-notsupp")    \
+       EM(rxperf_abort_unmarshal_error,        "rxperf-unmarshal")     \
+       /* RxKAD security errors */                                     \
+       EM(rxkad_abort_1_short_check,           "rxkad1-short-check")   \
+       EM(rxkad_abort_1_short_data,            "rxkad1-short-data")    \
+       EM(rxkad_abort_1_short_encdata,         "rxkad1-short-encdata") \
+       EM(rxkad_abort_1_short_header,          "rxkad1-short-hdr")     \
+       EM(rxkad_abort_2_short_check,           "rxkad2-short-check")   \
+       EM(rxkad_abort_2_short_data,            "rxkad2-short-data")    \
+       EM(rxkad_abort_2_short_header,          "rxkad2-short-hdr")     \
+       EM(rxkad_abort_2_short_len,             "rxkad2-short-len")     \
+       EM(rxkad_abort_bad_checksum,            "rxkad2-bad-cksum")     \
+       EM(rxkad_abort_chall_key_expired,       "rxkad-chall-key-exp")  \
+       EM(rxkad_abort_chall_level,             "rxkad-chall-level")    \
+       EM(rxkad_abort_chall_no_key,            "rxkad-chall-nokey")    \
+       EM(rxkad_abort_chall_short,             "rxkad-chall-short")    \
+       EM(rxkad_abort_chall_version,           "rxkad-chall-version")  \
+       EM(rxkad_abort_resp_bad_callid,         "rxkad-resp-bad-callid") \
+       EM(rxkad_abort_resp_bad_checksum,       "rxkad-resp-bad-cksum") \
+       EM(rxkad_abort_resp_bad_param,          "rxkad-resp-bad-param") \
+       EM(rxkad_abort_resp_call_ctr,           "rxkad-resp-call-ctr") \
+       EM(rxkad_abort_resp_call_state,         "rxkad-resp-call-state") \
+       EM(rxkad_abort_resp_key_expired,        "rxkad-resp-key-exp")   \
+       EM(rxkad_abort_resp_key_rejected,       "rxkad-resp-key-rej")   \
+       EM(rxkad_abort_resp_level,              "rxkad-resp-level")     \
+       EM(rxkad_abort_resp_nokey,              "rxkad-resp-nokey")     \
+       EM(rxkad_abort_resp_ooseq,              "rxkad-resp-ooseq")     \
+       EM(rxkad_abort_resp_short,              "rxkad-resp-short")     \
+       EM(rxkad_abort_resp_short_tkt,          "rxkad-resp-short-tkt") \
+       EM(rxkad_abort_resp_tkt_aname,          "rxkad-resp-tk-aname")  \
+       EM(rxkad_abort_resp_tkt_expired,        "rxkad-resp-tk-exp")    \
+       EM(rxkad_abort_resp_tkt_future,         "rxkad-resp-tk-future") \
+       EM(rxkad_abort_resp_tkt_inst,           "rxkad-resp-tk-inst")   \
+       EM(rxkad_abort_resp_tkt_len,            "rxkad-resp-tk-len")    \
+       EM(rxkad_abort_resp_tkt_realm,          "rxkad-resp-tk-realm")  \
+       EM(rxkad_abort_resp_tkt_short,          "rxkad-resp-tk-short")  \
+       EM(rxkad_abort_resp_tkt_sinst,          "rxkad-resp-tk-sinst")  \
+       EM(rxkad_abort_resp_tkt_sname,          "rxkad-resp-tk-sname")  \
+       EM(rxkad_abort_resp_unknown_tkt,        "rxkad-resp-unknown-tkt") \
+       EM(rxkad_abort_resp_version,            "rxkad-resp-version")   \
+       /* rxrpc errors */                                              \
+       EM(rxrpc_abort_call_improper_term,      "call-improper-term")   \
+       EM(rxrpc_abort_call_reset,              "call-reset")           \
+       EM(rxrpc_abort_call_sendmsg,            "call-sendmsg")         \
+       EM(rxrpc_abort_call_sock_release,       "call-sock-rel")        \
+       EM(rxrpc_abort_call_sock_release_tba,   "call-sock-rel-tba")    \
+       EM(rxrpc_abort_call_timeout,            "call-timeout")         \
+       EM(rxrpc_abort_no_service_key,          "no-serv-key")          \
+       EM(rxrpc_abort_nomem,                   "nomem")                \
+       EM(rxrpc_abort_service_not_offered,     "serv-not-offered")     \
+       EM(rxrpc_abort_shut_down,               "shut-down")            \
+       EM(rxrpc_abort_unsupported_security,    "unsup-sec")            \
+       EM(rxrpc_badmsg_bad_abort,              "bad-abort")            \
+       EM(rxrpc_badmsg_bad_jumbo,              "bad-jumbo")            \
+       EM(rxrpc_badmsg_short_ack,              "short-ack")            \
+       EM(rxrpc_badmsg_short_ack_info,         "short-ack-info")       \
+       EM(rxrpc_badmsg_short_hdr,              "short-hdr")            \
+       EM(rxrpc_badmsg_unsupported_packet,     "unsup-pkt")            \
+       EM(rxrpc_badmsg_zero_call,              "zero-call")            \
+       EM(rxrpc_badmsg_zero_seq,               "zero-seq")             \
+       EM(rxrpc_badmsg_zero_service,           "zero-service")         \
+       EM(rxrpc_eproto_ackr_outside_window,    "ackr-out-win")         \
+       EM(rxrpc_eproto_ackr_sack_overflow,     "ackr-sack-over")       \
+       EM(rxrpc_eproto_ackr_short_sack,        "ackr-short-sack")      \
+       EM(rxrpc_eproto_ackr_zero,              "ackr-zero")            \
+       EM(rxrpc_eproto_bad_upgrade,            "bad-upgrade")          \
+       EM(rxrpc_eproto_data_after_last,        "data-after-last")      \
+       EM(rxrpc_eproto_different_last,         "diff-last")            \
+       EM(rxrpc_eproto_early_reply,            "early-reply")          \
+       EM(rxrpc_eproto_improper_term,          "improper-term")        \
+       EM(rxrpc_eproto_no_client_call,         "no-cl-call")           \
+       EM(rxrpc_eproto_no_client_conn,         "no-cl-conn")           \
+       EM(rxrpc_eproto_no_service_call,        "no-sv-call")           \
+       EM(rxrpc_eproto_reupgrade,              "re-upgrade")           \
+       EM(rxrpc_eproto_rxnull_challenge,       "rxnull-chall")         \
+       EM(rxrpc_eproto_rxnull_response,        "rxnull-resp")          \
+       EM(rxrpc_eproto_tx_rot_last,            "tx-rot-last")          \
+       EM(rxrpc_eproto_unexpected_ack,         "unex-ack")             \
+       EM(rxrpc_eproto_unexpected_ackall,      "unex-ackall")          \
+       EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end")       \
+       EM(rxrpc_eproto_unexpected_reply,       "unex-reply")           \
+       EM(rxrpc_eproto_wrong_security,         "wrong-sec")            \
+       EM(rxrpc_recvmsg_excess_data,           "recvmsg-excess")       \
+       EM(rxrpc_recvmsg_short_data,            "recvmsg-short")        \
+       E_(rxrpc_sendmsg_late_send,             "sendmsg-late")
+
 #define rxrpc_call_poke_traces \
+       EM(rxrpc_call_poke_abort,               "Abort")        \
+       EM(rxrpc_call_poke_complete,            "Compl")        \
        EM(rxrpc_call_poke_error,               "Error")        \
        EM(rxrpc_call_poke_idle,                "Idle")         \
        EM(rxrpc_call_poke_start,               "Start")        \
 #define rxrpc_skb_traces \
        EM(rxrpc_skb_eaten_by_unshare,          "ETN unshare  ") \
        EM(rxrpc_skb_eaten_by_unshare_nomem,    "ETN unshar-nm") \
+       EM(rxrpc_skb_get_conn_secured,          "GET conn-secd") \
        EM(rxrpc_skb_get_conn_work,             "GET conn-work") \
        EM(rxrpc_skb_get_local_work,            "GET locl-work") \
        EM(rxrpc_skb_get_reject_work,           "GET rej-work ") \
        EM(rxrpc_skb_new_error_report,          "NEW error-rpt") \
        EM(rxrpc_skb_new_jumbo_subpacket,       "NEW jumbo-sub") \
        EM(rxrpc_skb_new_unshared,              "NEW unshared ") \
+       EM(rxrpc_skb_put_conn_secured,          "PUT conn-secd") \
        EM(rxrpc_skb_put_conn_work,             "PUT conn-work") \
        EM(rxrpc_skb_put_error_report,          "PUT error-rep") \
        EM(rxrpc_skb_put_input,                 "PUT input    ") \
 #define rxrpc_peer_traces \
        EM(rxrpc_peer_free,                     "FREE        ") \
        EM(rxrpc_peer_get_accept,               "GET accept  ") \
-       EM(rxrpc_peer_get_activate_call,        "GET act-call") \
        EM(rxrpc_peer_get_bundle,               "GET bundle  ") \
        EM(rxrpc_peer_get_client_conn,          "GET cln-conn") \
        EM(rxrpc_peer_get_input,                "GET input   ") \
        EM(rxrpc_peer_put_bundle,               "PUT bundle  ") \
        EM(rxrpc_peer_put_call,                 "PUT call    ") \
        EM(rxrpc_peer_put_conn,                 "PUT conn    ") \
-       EM(rxrpc_peer_put_discard_tmp,          "PUT disc-tmp") \
        EM(rxrpc_peer_put_input,                "PUT input   ") \
        EM(rxrpc_peer_put_input_error,          "PUT inpt-err") \
        E_(rxrpc_peer_put_keepalive,            "PUT keepaliv")
        EM(rxrpc_bundle_get_client_call,        "GET clt-call") \
        EM(rxrpc_bundle_get_client_conn,        "GET clt-conn") \
        EM(rxrpc_bundle_get_service_conn,       "GET svc-conn") \
+       EM(rxrpc_bundle_put_call,               "PUT call    ") \
        EM(rxrpc_bundle_put_conn,               "PUT conn    ") \
        EM(rxrpc_bundle_put_discard,            "PUT discard ") \
        E_(rxrpc_bundle_new,                    "NEW         ")
        EM(rxrpc_conn_get_call_input,           "GET inp-call") \
        EM(rxrpc_conn_get_conn_input,           "GET inp-conn") \
        EM(rxrpc_conn_get_idle,                 "GET idle    ") \
-       EM(rxrpc_conn_get_poke,                 "GET poke    ") \
+       EM(rxrpc_conn_get_poke_abort,           "GET pk-abort") \
+       EM(rxrpc_conn_get_poke_timer,           "GET poke    ") \
        EM(rxrpc_conn_get_service_conn,         "GET svc-conn") \
        EM(rxrpc_conn_new_client,               "NEW client  ") \
        EM(rxrpc_conn_new_service,              "NEW service ") \
        EM(rxrpc_conn_put_call,                 "PUT call    ") \
        EM(rxrpc_conn_put_call_input,           "PUT inp-call") \
        EM(rxrpc_conn_put_conn_input,           "PUT inp-conn") \
-       EM(rxrpc_conn_put_discard,              "PUT discard ") \
        EM(rxrpc_conn_put_discard_idle,         "PUT disc-idl") \
        EM(rxrpc_conn_put_local_dead,           "PUT loc-dead") \
        EM(rxrpc_conn_put_noreuse,              "PUT noreuse ") \
        EM(rxrpc_conn_put_service_reaped,       "PUT svc-reap") \
        EM(rxrpc_conn_put_unbundle,             "PUT unbundle") \
        EM(rxrpc_conn_put_unidle,               "PUT unidle  ") \
+       EM(rxrpc_conn_put_work,                 "PUT work    ") \
        EM(rxrpc_conn_queue_challenge,          "QUE chall   ") \
        EM(rxrpc_conn_queue_retry_work,         "QUE retry-wk") \
        EM(rxrpc_conn_queue_rx_work,            "QUE rx-work ") \
-       EM(rxrpc_conn_queue_timer,              "QUE timer   ") \
        EM(rxrpc_conn_see_new_service_conn,     "SEE new-svc ") \
        EM(rxrpc_conn_see_reap_service,         "SEE reap-svc") \
        E_(rxrpc_conn_see_work,                 "SEE work    ")
        EM(rxrpc_client_chan_activate,          "ChActv") \
        EM(rxrpc_client_chan_disconnect,        "ChDisc") \
        EM(rxrpc_client_chan_pass,              "ChPass") \
-       EM(rxrpc_client_chan_wait_failed,       "ChWtFl") \
        EM(rxrpc_client_cleanup,                "Clean ") \
        EM(rxrpc_client_discard,                "Discar") \
-       EM(rxrpc_client_duplicate,              "Duplic") \
        EM(rxrpc_client_exposed,                "Expose") \
        EM(rxrpc_client_replace,                "Replac") \
+       EM(rxrpc_client_queue_new_call,         "Q-Call") \
        EM(rxrpc_client_to_active,              "->Actv") \
        E_(rxrpc_client_to_idle,                "->Idle")
 
 #define rxrpc_call_traces \
+       EM(rxrpc_call_get_io_thread,            "GET iothread") \
        EM(rxrpc_call_get_input,                "GET input   ") \
        EM(rxrpc_call_get_kernel_service,       "GET krnl-srv") \
        EM(rxrpc_call_get_notify_socket,        "GET notify  ") \
        EM(rxrpc_call_new_prealloc_service,     "NEW prealloc") \
        EM(rxrpc_call_put_discard_prealloc,     "PUT disc-pre") \
        EM(rxrpc_call_put_discard_error,        "PUT disc-err") \
+       EM(rxrpc_call_put_io_thread,            "PUT iothread") \
        EM(rxrpc_call_put_input,                "PUT input   ") \
        EM(rxrpc_call_put_kernel,               "PUT kernel  ") \
        EM(rxrpc_call_put_poke,                 "PUT poke    ") \
        EM(rxrpc_call_put_sendmsg,              "PUT sendmsg ") \
        EM(rxrpc_call_put_unnotify,             "PUT unnotify") \
        EM(rxrpc_call_put_userid_exists,        "PUT u-exists") \
+       EM(rxrpc_call_put_userid,               "PUT user-id ") \
        EM(rxrpc_call_see_accept,               "SEE accept  ") \
        EM(rxrpc_call_see_activate_client,      "SEE act-clnt") \
        EM(rxrpc_call_see_connect_failed,       "SEE con-fail") \
        EM(rxrpc_call_see_connected,            "SEE connect ") \
+       EM(rxrpc_call_see_disconnected,         "SEE disconn ") \
        EM(rxrpc_call_see_distribute_error,     "SEE dist-err") \
        EM(rxrpc_call_see_input,                "SEE input   ") \
        EM(rxrpc_call_see_release,              "SEE release ") \
 #define EM(a, b) a,
 #define E_(a, b) a
 
+enum rxrpc_abort_reason                { rxrpc_abort_reasons } __mode(byte);
 enum rxrpc_bundle_trace                { rxrpc_bundle_traces } __mode(byte);
 enum rxrpc_call_poke_trace     { rxrpc_call_poke_traces } __mode(byte);
 enum rxrpc_call_trace          { rxrpc_call_traces } __mode(byte);
@@ -404,9 +509,13 @@ enum rxrpc_txqueue_trace   { rxrpc_txqueue_traces } __mode(byte);
  */
 #undef EM
 #undef E_
+
+#ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS
+
 #define EM(a, b) TRACE_DEFINE_ENUM(a);
 #define E_(a, b) TRACE_DEFINE_ENUM(a);
 
+rxrpc_abort_reasons;
 rxrpc_bundle_traces;
 rxrpc_call_poke_traces;
 rxrpc_call_traces;
@@ -657,14 +766,14 @@ TRACE_EVENT(rxrpc_rx_done,
            );
 
 TRACE_EVENT(rxrpc_abort,
-           TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id,
-                    rxrpc_seq_t seq, int abort_code, int error),
+           TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why,
+                    u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error),
 
            TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call_nr         )
-                   __array(char,                       why, 4          )
+                   __field(enum rxrpc_abort_reason,    why             )
                    __field(u32,                        cid             )
                    __field(u32,                        call_id         )
                    __field(rxrpc_seq_t,                seq             )
@@ -673,8 +782,8 @@ TRACE_EVENT(rxrpc_abort,
                             ),
 
            TP_fast_assign(
-                   memcpy(__entry->why, why, 4);
                    __entry->call_nr = call_nr;
+                   __entry->why = why;
                    __entry->cid = cid;
                    __entry->call_id = call_id;
                    __entry->abort_code = abort_code;
@@ -685,7 +794,8 @@ TRACE_EVENT(rxrpc_abort,
            TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s",
                      __entry->call_nr,
                      __entry->cid, __entry->call_id, __entry->seq,
-                     __entry->abort_code, __entry->error, __entry->why)
+                     __entry->abort_code, __entry->error,
+                     __print_symbolic(__entry->why, rxrpc_abort_reasons))
            );
 
 TRACE_EVENT(rxrpc_call_complete,
@@ -1062,10 +1172,10 @@ TRACE_EVENT(rxrpc_receive,
            );
 
 TRACE_EVENT(rxrpc_recvmsg,
-           TP_PROTO(struct rxrpc_call *call, enum rxrpc_recvmsg_trace why,
+           TP_PROTO(unsigned int call_debug_id, enum rxrpc_recvmsg_trace why,
                     int ret),
 
-           TP_ARGS(call, why, ret),
+           TP_ARGS(call_debug_id, why, ret),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
@@ -1074,7 +1184,7 @@ TRACE_EVENT(rxrpc_recvmsg,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call ? call->debug_id : 0;
+                   __entry->call = call_debug_id;
                    __entry->why = why;
                    __entry->ret = ret;
                           ),
@@ -1521,30 +1631,6 @@ TRACE_EVENT(rxrpc_improper_term,
                      __entry->abort_code)
            );
 
-TRACE_EVENT(rxrpc_rx_eproto,
-           TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
-                    const char *why),
-
-           TP_ARGS(call, serial, why),
-
-           TP_STRUCT__entry(
-                   __field(unsigned int,               call            )
-                   __field(rxrpc_serial_t,             serial          )
-                   __field(const char *,               why             )
-                            ),
-
-           TP_fast_assign(
-                   __entry->call = call ? call->debug_id : 0;
-                   __entry->serial = serial;
-                   __entry->why = why;
-                          ),
-
-           TP_printk("c=%08x EPROTO %08x %s",
-                     __entry->call,
-                     __entry->serial,
-                     __entry->why)
-           );
-
 TRACE_EVENT(rxrpc_connect_call,
            TP_PROTO(struct rxrpc_call *call),
 
@@ -1842,6 +1928,8 @@ TRACE_EVENT(rxrpc_call_poked,
 
 #undef EM
 #undef E_
+
+#endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */
 #endif /* _TRACE_RXRPC_H */
 
 /* This part must be outside protection */
index a9e2250cd7205a58350a9f08a1b4709f9746cdde..d47c47d06f1109a9a18d58d38938254aa6827f7b 100644 (file)
@@ -38,7 +38,7 @@
  */
 #define BR2684_ENCAPS_VC       (0)     /* VC-mux */
 #define BR2684_ENCAPS_LLC      (1)
-#define BR2684_ENCAPS_AUTODETECT (2)   /* Unsuported */
+#define BR2684_ENCAPS_AUTODETECT (2)   /* Unsupported */
 
 /*
  * Is this VC bridged or routed?
index 9d4c4078e8d00e23957314892bf5963b938220db..2780bce62fafe63354a58df610d7c912efc870c3 100644 (file)
 
 #include <linux/fs.h>
 #include <linux/types.h>
+/*
+ * this file is shared with liburing and that has to autodetect
+ * if linux/time_types.h is available or not, it can
+ * define UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
+ * if linux/time_types.h is not available
+ */
+#ifndef UAPI_LINUX_IO_URING_H_SKIP_LINUX_TIME_TYPES_H
 #include <linux/time_types.h>
+#endif
 
 #ifdef __cplusplus
 extern "C" {
index 20522d4ba1e0d8b058876d93fd9dd554910f6263..55155e262646e5fc48b6317054b07b1c15aad02a 100644 (file)
@@ -1767,6 +1767,7 @@ struct kvm_xen_hvm_attr {
                __u8 runstate_update_flag;
                struct {
                        __u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
                } shared_info;
                struct {
                        __u32 send_port;
@@ -1798,6 +1799,7 @@ struct kvm_xen_hvm_attr {
        } u;
 };
 
+
 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
 #define KVM_XEN_ATTR_TYPE_LONG_MODE            0x0
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO          0x1
@@ -1823,6 +1825,7 @@ struct kvm_xen_vcpu_attr {
        __u16 pad[3];
        union {
                __u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
                __u64 pad[8];
                struct {
                        __u64 state;
diff --git a/include/uapi/linux/pktcdvd.h b/include/uapi/linux/pktcdvd.h
new file mode 100644 (file)
index 0000000..9cbb55d
--- /dev/null
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices.
+ *
+ */
+#ifndef _UAPI__PKTCDVD_H
+#define _UAPI__PKTCDVD_H
+
+#include <linux/types.h>
+
+/*
+ * 1 for normal debug messages, 2 is very verbose. 0 to turn it off.
+ */
+#define PACKET_DEBUG           1
+
+#define        MAX_WRITERS             8
+
+#define PKT_RB_POOL_SIZE       512
+
+/*
+ * How long we should hold a non-full packet before starting data gathering.
+ */
+#define PACKET_WAIT_TIME       (HZ * 5 / 1000)
+
+/*
+ * use drive write caching -- we need deferred error handling to be
+ * able to successfully recover with this option (drive will return good
+ * status as soon as the cdb is validated).
+ */
+#if defined(CONFIG_CDROM_PKTCDVD_WCACHE)
+#define USE_WCACHING           1
+#else
+#define USE_WCACHING           0
+#endif
+
+/*
+ * No user-servicable parts beyond this point ->
+ */
+
+/*
+ * device types
+ */
+#define PACKET_CDR             1
+#define        PACKET_CDRW             2
+#define PACKET_DVDR            3
+#define PACKET_DVDRW           4
+
+/*
+ * flags
+ */
+#define PACKET_WRITABLE                1       /* pd is writable */
+#define PACKET_NWA_VALID       2       /* next writable address valid */
+#define PACKET_LRA_VALID       3       /* last recorded address valid */
+#define PACKET_MERGE_SEGS      4       /* perform segment merging to keep */
+                                       /* underlying cdrom device happy */
+
+/*
+ * Disc status -- from READ_DISC_INFO
+ */
+#define PACKET_DISC_EMPTY      0
+#define PACKET_DISC_INCOMPLETE 1
+#define PACKET_DISC_COMPLETE   2
+#define PACKET_DISC_OTHER      3
+
+/*
+ * write type, and corresponding data block type
+ */
+#define PACKET_MODE1           1
+#define PACKET_MODE2           2
+#define PACKET_BLOCK_MODE1     8
+#define PACKET_BLOCK_MODE2     10
+
+/*
+ * Last session/border status
+ */
+#define PACKET_SESSION_EMPTY           0
+#define PACKET_SESSION_INCOMPLETE      1
+#define PACKET_SESSION_RESERVED                2
+#define PACKET_SESSION_COMPLETE                3
+
+#define PACKET_MCN                     "4a656e734178626f65323030300000"
+
+#undef PACKET_USE_LS
+
+#define PKT_CTRL_CMD_SETUP     0
+#define PKT_CTRL_CMD_TEARDOWN  1
+#define PKT_CTRL_CMD_STATUS    2
+
+struct pkt_ctrl_command {
+       __u32 command;                          /* in: Setup, teardown, status */
+       __u32 dev_index;                        /* in/out: Device index */
+       __u32 dev;                              /* in/out: Device nr for cdrw device */
+       __u32 pkt_dev;                          /* in/out: Device nr for packet device */
+       __u32 num_devices;                      /* out: Largest device index + 1 */
+       __u32 padding;                          /* Not used */
+};
+
+/*
+ * packet ioctls
+ */
+#define PACKET_IOCTL_MAGIC     ('X')
+#define PACKET_CTRL_CMD                _IOWR(PACKET_IOCTL_MAGIC, 1, struct pkt_ctrl_command)
+
+
+#endif /* _UAPI__PKTCDVD_H */
index 3511095c2702bef5f4a0f160298e5d255579a3e5..42a40ad3fb622ce330ae4acb9a99bfef4dd3c0e5 100644 (file)
@@ -58,7 +58,7 @@
 
 #define PSCI_1_1_FN_SYSTEM_RESET2              PSCI_0_2_FN(18)
 #define PSCI_1_1_FN_MEM_PROTECT                        PSCI_0_2_FN(19)
-#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(19)
+#define PSCI_1_1_FN_MEM_PROTECT_CHECK_RANGE    PSCI_0_2_FN(20)
 
 #define PSCI_1_0_FN64_CPU_DEFAULT_SUSPEND      PSCI_0_2_FN64(12)
 #define PSCI_1_0_FN64_NODE_HW_STATE            PSCI_0_2_FN64(13)
@@ -67,7 +67,7 @@
 #define PSCI_1_0_FN64_STAT_COUNT               PSCI_0_2_FN64(17)
 
 #define PSCI_1_1_FN64_SYSTEM_RESET2            PSCI_0_2_FN64(18)
-#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(19)
+#define PSCI_1_1_FN64_MEM_PROTECT_CHECK_RANGE  PSCI_0_2_FN64(20)
 
 /* PSCI v0.2 power state encoding for CPU_SUSPEND function */
 #define PSCI_0_2_POWER_STATE_ID_MASK           0xffff
index 9bd79235c875f25ea6825dccd89858b5973796f2..54b649ab0f22b6c6d4aad09cca3141db505be496 100644 (file)
@@ -53,11 +53,9 @@ enum vdpa_attr {
        VDPA_ATTR_DEV_VENDOR_ATTR_NAME,         /* string */
        VDPA_ATTR_DEV_VENDOR_ATTR_VALUE,        /* u64 */
 
+       /* virtio features that are provisioned to the vDPA device */
        VDPA_ATTR_DEV_FEATURES,                 /* u64 */
 
-       /* virtio features that are supported by the vDPA device */
-       VDPA_ATTR_VDPA_DEV_SUPPORTED_FEATURES,  /* u64 */
-
        /* new attributes must be added above here */
        VDPA_ATTR_MAX,
 };
index eaa932b99d8ac2d32a0b0bb73a1a6ce9eff12b95..ad4fb4eab753d41925b82c8de01ee6b1d81cd2fa 100644 (file)
@@ -117,7 +117,7 @@ struct xenbus_driver {
                     const struct xenbus_device_id *id);
        void (*otherend_changed)(struct xenbus_device *dev,
                                 enum xenbus_state backend_state);
-       int (*remove)(struct xenbus_device *dev);
+       void (*remove)(struct xenbus_device *dev);
        int (*suspend)(struct xenbus_device *dev);
        int (*resume)(struct xenbus_device *dev);
        int (*uevent)(struct xenbus_device *, struct kobj_uevent_env *);
index 7e5c3ddc341de319115e47399beb53e9b9960363..44e90b28a30f12e82e7006d46a4daa588c29f15f 100644 (file)
@@ -204,7 +204,7 @@ config LOCALVERSION_AUTO
          appended after any matching localversion* files, and after the value
          set in CONFIG_LOCALVERSION.
 
-         (The actual string used here is the first eight characters produced
+         (The actual string used here is the first 12 characters produced
          by running the command:
 
            $ git rev-parse --verify HEAD
@@ -776,7 +776,7 @@ config PRINTK_SAFE_LOG_BUF_SHIFT
        depends on PRINTK
        help
          Select the size of an alternate printk per-CPU buffer where messages
-         printed from usafe contexts are temporary stored. One example would
+         printed from unsafe contexts are temporary stored. One example would
          be NMI messages, another one - printk recursion. The messages are
          copied to the main log buffer in a safe context to avoid a deadlock.
          The value defines the size as a power of 2.
@@ -894,13 +894,17 @@ config CC_IMPLICIT_FALLTHROUGH
        default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5)
        default "-Wimplicit-fallthrough" if CC_IS_CLANG && $(cc-option,-Wunreachable-code-fallthrough)
 
-# Currently, disable gcc-12 array-bounds globally.
+# Currently, disable gcc-11,12 array-bounds globally.
 # We may want to target only particular configurations some day.
+config GCC11_NO_ARRAY_BOUNDS
+       def_bool y
+
 config GCC12_NO_ARRAY_BOUNDS
        def_bool y
 
 config CC_NO_ARRAY_BOUNDS
        bool
+       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_ARRAY_BOUNDS
        default y if CC_IS_GCC && GCC_VERSION >= 120000 && GCC_VERSION < 130000 && GCC12_NO_ARRAY_BOUNDS
 
 #
index 8316c23bead26814a724e311c41d1b9c5431c86c..26de459006c4eef17558ddd607b407b4740cfcc9 100644 (file)
@@ -59,3 +59,4 @@ include/generated/utsversion.h: FORCE
 
 $(obj)/version-timestamp.o: include/generated/utsversion.h
 CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
+KASAN_SANITIZE_version-timestamp.o := n
index 179e93bae5390e80cd903c691a303872906db4af..043cbf80a766deec23a29e0cac70c22f9a149c6e 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <generated/compile.h>
 #include <generated/utsrelease.h>
-#include <linux/version.h>
 #include <linux/proc_ns.h>
 #include <linux/refcount.h>
 #include <linux/uts.h>
index 2291a53cdabd1191242f53ac220833174fff2def..b4f5dfacc0c31d905cb90374ae23d044b28dd659 100644 (file)
@@ -288,24 +288,23 @@ int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
 
                ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
 
+               mutex_unlock(&ctx->uring_lock);
                if (ret != -EALREADY)
                        break;
 
-               mutex_unlock(&ctx->uring_lock);
                ret = io_run_task_work_sig(ctx);
-               if (ret < 0) {
-                       mutex_lock(&ctx->uring_lock);
+               if (ret < 0)
                        break;
-               }
                ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
-               mutex_lock(&ctx->uring_lock);
                if (!ret) {
                        ret = -ETIME;
                        break;
                }
+               mutex_lock(&ctx->uring_lock);
        } while (1);
 
        finish_wait(&ctx->cq_wait, &wait);
+       mutex_lock(&ctx->uring_lock);
 
        if (ret == -ENOENT || ret > 0)
                ret = 0;
index 2e04850a657b075e6acf0e295beba6d2e230a7ff..882bd56b01ed0f5bd6e61668b1b33b711687a0c5 100644 (file)
@@ -170,12 +170,11 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                xa_for_each(&ctx->personalities, index, cred)
                        io_uring_show_cred(m, index, cred);
        }
-       if (has_lock)
-               mutex_unlock(&ctx->uring_lock);
 
        seq_puts(m, "PollList:\n");
        for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) {
                struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i];
+               struct io_hash_bucket *hbl = &ctx->cancel_table_locked.hbs[i];
                struct io_kiocb *req;
 
                spin_lock(&hb->lock);
@@ -183,8 +182,17 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
                        seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
                                        task_work_pending(req->task));
                spin_unlock(&hb->lock);
+
+               if (!has_lock)
+                       continue;
+               hlist_for_each_entry(req, &hbl->list, hash_node)
+                       seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
+                                       task_work_pending(req->task));
        }
 
+       if (has_lock)
+               mutex_unlock(&ctx->uring_lock);
+
        seq_puts(m, "CqOverflowList:\n");
        spin_lock(&ctx->completion_lock);
        list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) {
index 6f1d0e5df23ad815479904639282bf7a8b4b3b33..411bb2d1acd452f6bb1af070dd338fd57fb6d3f7 100644 (file)
@@ -1230,6 +1230,12 @@ static void io_wq_cancel_tw_create(struct io_wq *wq)
 
                worker = container_of(cb, struct io_worker, create_work);
                io_worker_cancel_cb(worker);
+               /*
+                * Only the worker continuation helper has worker allocated and
+                * hence needs freeing.
+                */
+               if (cb->func == create_worker_cont)
+                       kfree(worker);
        }
 }
 
index ff2bbac1a10f4afbaad0bb14820a0df0ee70893e..0a4efada9b3c3dbc40677208174f9eb892470969 100644 (file)
@@ -677,16 +677,20 @@ static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx)
        io_cq_unlock_post(ctx);
 }
 
+static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
+{
+       /* iopoll syncs against uring_lock, not completion_lock */
+       if (ctx->flags & IORING_SETUP_IOPOLL)
+               mutex_lock(&ctx->uring_lock);
+       __io_cqring_overflow_flush(ctx);
+       if (ctx->flags & IORING_SETUP_IOPOLL)
+               mutex_unlock(&ctx->uring_lock);
+}
+
 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx)
 {
-       if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
-               /* iopoll syncs against uring_lock, not completion_lock */
-               if (ctx->flags & IORING_SETUP_IOPOLL)
-                       mutex_lock(&ctx->uring_lock);
-               __io_cqring_overflow_flush(ctx);
-               if (ctx->flags & IORING_SETUP_IOPOLL)
-                       mutex_unlock(&ctx->uring_lock);
-       }
+       if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq))
+               io_cqring_do_overflow_flush(ctx);
 }
 
 void __io_put_task(struct task_struct *task, int nr)
@@ -727,6 +731,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
        size_t ocq_size = sizeof(struct io_overflow_cqe);
        bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
 
+       lockdep_assert_held(&ctx->completion_lock);
+
        if (is_cqe32)
                ocq_size += sizeof(struct io_uring_cqe);
 
@@ -816,9 +822,6 @@ static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
 {
        struct io_uring_cqe *cqe;
 
-       if (!ctx->task_complete)
-               lockdep_assert_held(&ctx->completion_lock);
-
        ctx->cq_extra++;
 
        /*
@@ -1232,13 +1235,18 @@ static void io_req_local_work_add(struct io_kiocb *req)
 {
        struct io_ring_ctx *ctx = req->ctx;
 
-       if (!llist_add(&req->io_task_work.node, &ctx->work_llist))
+       percpu_ref_get(&ctx->refs);
+
+       if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
+               percpu_ref_put(&ctx->refs);
                return;
+       }
        /* need it for the following io_cqring_wake() */
        smp_mb__after_atomic();
 
        if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
                io_move_task_work_from_local(ctx);
+               percpu_ref_put(&ctx->refs);
                return;
        }
 
@@ -1248,6 +1256,7 @@ static void io_req_local_work_add(struct io_kiocb *req)
        if (ctx->has_evfd)
                io_eventfd_signal(ctx);
        __io_cqring_wake(ctx);
+       percpu_ref_put(&ctx->refs);
 }
 
 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
@@ -2461,7 +2470,7 @@ int io_run_task_work_sig(struct io_ring_ctx *ctx)
 /* when returns >0, the caller should retry */
 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                                          struct io_wait_queue *iowq,
-                                         ktime_t timeout)
+                                         ktime_t *timeout)
 {
        int ret;
        unsigned long check_cq;
@@ -2479,7 +2488,7 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
                if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
                        return -EBADR;
        }
-       if (!schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS))
+       if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
                return -ETIME;
 
        /*
@@ -2549,10 +2558,13 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
 
        trace_io_uring_cqring_wait(ctx, min_events);
        do {
-               io_cqring_overflow_flush(ctx);
+               if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
+                       finish_wait(&ctx->cq_wait, &iowq.wq);
+                       io_cqring_do_overflow_flush(ctx);
+               }
                prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
                                                TASK_INTERRUPTIBLE);
-               ret = io_cqring_wait_schedule(ctx, &iowq, timeout);
+               ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
                if (__io_cqring_events_user(ctx) >= min_events)
                        break;
                cond_resched();
@@ -3662,7 +3674,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
 
        if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
            && !(ctx->flags & IORING_SETUP_R_DISABLED))
-               ctx->submitter_task = get_task_struct(current);
+               WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
 
        file = io_uring_get_file(ctx);
        if (IS_ERR(file)) {
@@ -3856,7 +3868,7 @@ static int io_register_enable_rings(struct io_ring_ctx *ctx)
                return -EBADFD;
 
        if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
-               ctx->submitter_task = get_task_struct(current);
+               WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
 
        if (ctx->restrictions.registered)
                ctx->restricted = 1;
@@ -4013,8 +4025,6 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
                return -EEXIST;
 
        if (ctx->restricted) {
-               if (opcode >= IORING_REGISTER_LAST)
-                       return -EINVAL;
                opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
                if (!test_bit(opcode, ctx->restrictions.register_op))
                        return -EACCES;
@@ -4170,6 +4180,9 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
        long ret = -EBADF;
        struct fd f;
 
+       if (opcode >= IORING_REGISTER_LAST)
+               return -EINVAL;
+
        f = fdget(fd);
        if (!f.file)
                return -EBADF;
index e9f0d41ebb9960189930e2699669fc206b0b764f..ab4b2a1c3b7e80fba53d0c090962fd99a475aea4 100644 (file)
@@ -79,6 +79,19 @@ bool __io_alloc_req_refill(struct io_ring_ctx *ctx);
 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
                        bool cancel_all);
 
+#define io_lockdep_assert_cq_locked(ctx)                               \
+       do {                                                            \
+               if (ctx->flags & IORING_SETUP_IOPOLL) {                 \
+                       lockdep_assert_held(&ctx->uring_lock);          \
+               } else if (!ctx->task_complete) {                       \
+                       lockdep_assert_held(&ctx->completion_lock);     \
+               } else if (ctx->submitter_task->flags & PF_EXITING) {   \
+                       lockdep_assert(current_work());                 \
+               } else {                                                \
+                       lockdep_assert(current == ctx->submitter_task); \
+               }                                                       \
+       } while (0)
+
 static inline void io_req_task_work_add(struct io_kiocb *req)
 {
        __io_req_task_work_add(req, true);
@@ -92,6 +105,8 @@ void io_cq_unlock_post(struct io_ring_ctx *ctx);
 static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
                                                       bool overflow)
 {
+       io_lockdep_assert_cq_locked(ctx);
+
        if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
                struct io_uring_cqe *cqe = ctx->cqe_cached;
 
index 2d3cd945a531b914321a7a667632adcf038563b2..15602a136821b510a8201f90daf541b0c9188e08 100644 (file)
@@ -25,6 +25,28 @@ struct io_msg {
        u32 flags;
 };
 
+static void io_double_unlock_ctx(struct io_ring_ctx *octx)
+{
+       mutex_unlock(&octx->uring_lock);
+}
+
+static int io_double_lock_ctx(struct io_ring_ctx *octx,
+                             unsigned int issue_flags)
+{
+       /*
+        * To ensure proper ordering between the two ctxs, we can only
+        * attempt a trylock on the target. If that fails and we already have
+        * the source ctx lock, punt to io-wq.
+        */
+       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
+               if (!mutex_trylock(&octx->uring_lock))
+                       return -EAGAIN;
+               return 0;
+       }
+       mutex_lock(&octx->uring_lock);
+       return 0;
+}
+
 void io_msg_ring_cleanup(struct io_kiocb *req)
 {
        struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
@@ -36,6 +58,29 @@ void io_msg_ring_cleanup(struct io_kiocb *req)
        msg->src_file = NULL;
 }
 
+static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
+{
+       if (!target_ctx->task_complete)
+               return false;
+       return current != target_ctx->submitter_task;
+}
+
+static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
+{
+       struct io_ring_ctx *ctx = req->file->private_data;
+       struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+       struct task_struct *task = READ_ONCE(ctx->submitter_task);
+
+       if (unlikely(!task))
+               return -EOWNERDEAD;
+
+       init_task_work(&msg->tw, func);
+       if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
+               return -EOWNERDEAD;
+
+       return IOU_ISSUE_SKIP_COMPLETE;
+}
+
 static void io_msg_tw_complete(struct callback_head *head)
 {
        struct io_msg *msg = container_of(head, struct io_msg, tw);
@@ -43,61 +88,54 @@ static void io_msg_tw_complete(struct callback_head *head)
        struct io_ring_ctx *target_ctx = req->file->private_data;
        int ret = 0;
 
-       if (current->flags & PF_EXITING)
+       if (current->flags & PF_EXITING) {
                ret = -EOWNERDEAD;
-       else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
-               ret = -EOVERFLOW;
+       } else {
+               /*
+                * If the target ring is using IOPOLL mode, then we need to be
+                * holding the uring_lock for posting completions. Other ring
+                * types rely on the regular completion locking, which is
+                * handled while posting.
+                */
+               if (target_ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_lock(&target_ctx->uring_lock);
+               if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = -EOVERFLOW;
+               if (target_ctx->flags & IORING_SETUP_IOPOLL)
+                       mutex_unlock(&target_ctx->uring_lock);
+       }
 
        if (ret < 0)
                req_set_fail(req);
        io_req_queue_tw_complete(req, ret);
 }
 
-static int io_msg_ring_data(struct io_kiocb *req)
+static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
 {
        struct io_ring_ctx *target_ctx = req->file->private_data;
        struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
+       int ret;
 
        if (msg->src_fd || msg->dst_fd || msg->flags)
                return -EINVAL;
+       if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+               return -EBADFD;
 
-       if (target_ctx->task_complete && current != target_ctx->submitter_task) {
-               init_task_work(&msg->tw, io_msg_tw_complete);
-               if (task_work_add(target_ctx->submitter_task, &msg->tw,
-                                 TWA_SIGNAL_NO_IPI))
-                       return -EOWNERDEAD;
-
-               atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
-               return IOU_ISSUE_SKIP_COMPLETE;
-       }
-
-       if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
-               return 0;
+       if (io_msg_need_remote(target_ctx))
+               return io_msg_exec_remote(req, io_msg_tw_complete);
 
-       return -EOVERFLOW;
-}
-
-static void io_double_unlock_ctx(struct io_ring_ctx *octx,
-                                unsigned int issue_flags)
-{
-       mutex_unlock(&octx->uring_lock);
-}
-
-static int io_double_lock_ctx(struct io_ring_ctx *octx,
-                             unsigned int issue_flags)
-{
-       /*
-        * To ensure proper ordering between the two ctxs, we can only
-        * attempt a trylock on the target. If that fails and we already have
-        * the source ctx lock, punt to io-wq.
-        */
-       if (!(issue_flags & IO_URING_F_UNLOCKED)) {
-               if (!mutex_trylock(&octx->uring_lock))
+       ret = -EOVERFLOW;
+       if (target_ctx->flags & IORING_SETUP_IOPOLL) {
+               if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
                        return -EAGAIN;
-               return 0;
+               if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = 0;
+               io_double_unlock_ctx(target_ctx);
+       } else {
+               if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
+                       ret = 0;
        }
-       mutex_lock(&octx->uring_lock);
-       return 0;
+       return ret;
 }
 
 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
@@ -148,7 +186,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag
        if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
                ret = -EOVERFLOW;
 out_unlock:
-       io_double_unlock_ctx(target_ctx, issue_flags);
+       io_double_unlock_ctx(target_ctx);
        return ret;
 }
 
@@ -174,6 +212,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
 
        if (target_ctx == ctx)
                return -EINVAL;
+       if (target_ctx->flags & IORING_SETUP_R_DISABLED)
+               return -EBADFD;
        if (!src_file) {
                src_file = io_msg_grab_file(req, issue_flags);
                if (!src_file)
@@ -182,14 +222,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
                req->flags |= REQ_F_NEED_CLEANUP;
        }
 
-       if (target_ctx->task_complete && current != target_ctx->submitter_task) {
-               init_task_work(&msg->tw, io_msg_tw_fd_complete);
-               if (task_work_add(target_ctx->submitter_task, &msg->tw,
-                                 TWA_SIGNAL))
-                       return -EOWNERDEAD;
-
-               return IOU_ISSUE_SKIP_COMPLETE;
-       }
+       if (io_msg_need_remote(target_ctx))
+               return io_msg_exec_remote(req, io_msg_tw_fd_complete);
        return io_msg_install_complete(req, issue_flags);
 }
 
@@ -224,7 +258,7 @@ int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
 
        switch (msg->cmd) {
        case IORING_MSG_DATA:
-               ret = io_msg_ring_data(req);
+               ret = io_msg_ring_data(req, issue_flags);
                break;
        case IORING_MSG_SEND_FD:
                ret = io_msg_send_fd(req, issue_flags);
index ee7da6150ec41adc6e79019e5fc438e3e33c362c..2ac1366adbd77e822db141c9aed0b31198a5851d 100644 (file)
@@ -223,21 +223,22 @@ enum {
        IOU_POLL_DONE = 0,
        IOU_POLL_NO_ACTION = 1,
        IOU_POLL_REMOVE_POLL_USE_RES = 2,
+       IOU_POLL_REISSUE = 3,
 };
 
 /*
  * All poll tw should go through this. Checks for poll events, manages
  * references, does rewait, etc.
  *
- * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require,
- * which is either spurious wakeup or multishot CQE is served.
- * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res.
- * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result
- * is stored in req->cqe.
+ * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action
+ * require, which is either spurious wakeup or multishot CQE is served.
+ * IOU_POLL_DONE when it's done with the request, then the mask is stored in
+ * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
+ * poll and that the result is stored in req->cqe.
  */
 static int io_poll_check_events(struct io_kiocb *req, bool *locked)
 {
-       int v, ret;
+       int v;
 
        /* req->task == current here, checking PF_EXITING is safe */
        if (unlikely(req->task->flags & PF_EXITING))
@@ -276,10 +277,19 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                if (!req->cqe.res) {
                        struct poll_table_struct pt = { ._key = req->apoll_events };
                        req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
+                       /*
+                        * We got woken with a mask, but someone else got to
+                        * it first. The above vfs_poll() doesn't add us back
+                        * to the waitqueue, so if we get nothing back, we
+                        * should be safe and attempt a reissue.
+                        */
+                       if (unlikely(!req->cqe.res)) {
+                               /* Multishot armed need not reissue */
+                               if (!(req->apoll_events & EPOLLONESHOT))
+                                       continue;
+                               return IOU_POLL_REISSUE;
+                       }
                }
-
-               if ((unlikely(!req->cqe.res)))
-                       continue;
                if (req->apoll_events & EPOLLONESHOT)
                        return IOU_POLL_DONE;
 
@@ -294,7 +304,7 @@ static int io_poll_check_events(struct io_kiocb *req, bool *locked)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        }
                } else {
-                       ret = io_poll_issue(req, locked);
+                       int ret = io_poll_issue(req, locked);
                        if (ret == IOU_STOP_MULTISHOT)
                                return IOU_POLL_REMOVE_POLL_USE_RES;
                        if (ret < 0)
@@ -330,6 +340,9 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                        poll = io_kiocb_to_cmd(req, struct io_poll);
                        req->cqe.res = mangle_poll(req->cqe.res & poll->events);
+               } else if (ret == IOU_POLL_REISSUE) {
+                       io_req_task_submit(req, locked);
+                       return;
                } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) {
                        req->cqe.res = ret;
                        req_set_fail(req);
@@ -342,7 +355,7 @@ static void io_poll_task_func(struct io_kiocb *req, bool *locked)
 
                if (ret == IOU_POLL_REMOVE_POLL_USE_RES)
                        io_req_task_complete(req, locked);
-               else if (ret == IOU_POLL_DONE)
+               else if (ret == IOU_POLL_DONE || ret == IOU_POLL_REISSUE)
                        io_req_task_submit(req, locked);
                else
                        io_req_defer_failed(req, ret);
@@ -533,6 +546,14 @@ static bool io_poll_can_finish_inline(struct io_kiocb *req,
        return pt->owning || io_poll_get_ownership(req);
 }
 
+static void io_poll_add_hash(struct io_kiocb *req)
+{
+       if (req->flags & REQ_F_HASH_LOCKED)
+               io_poll_req_insert_locked(req);
+       else
+               io_poll_req_insert(req);
+}
+
 /*
  * Returns 0 when it's handed over for polling. The caller owns the requests if
  * it returns non-zero, but otherwise should not touch it. Negative values
@@ -591,18 +612,17 @@ static int __io_arm_poll_handler(struct io_kiocb *req,
 
        if (mask &&
           ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
-               if (!io_poll_can_finish_inline(req, ipt))
+               if (!io_poll_can_finish_inline(req, ipt)) {
+                       io_poll_add_hash(req);
                        return 0;
+               }
                io_poll_remove_entries(req);
                ipt->result_mask = mask;
                /* no one else has access to the req, forget about the ref */
                return 1;
        }
 
-       if (req->flags & REQ_F_HASH_LOCKED)
-               io_poll_req_insert_locked(req);
-       else
-               io_poll_req_insert(req);
+       io_poll_add_hash(req);
 
        if (mask && (poll->events & EPOLLET) &&
            io_poll_can_finish_inline(req, ipt)) {
index 8227af2e1c0f5e0add7d364a8d5be13f62aa23dd..9c3ddd46a1adc5abc4f10af6459381a24c4490a7 100644 (file)
@@ -1062,7 +1062,11 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
                        continue;
 
                req->cqe.flags = io_put_kbuf(req, 0);
-               io_fill_cqe_req(req->ctx, req);
+               if (unlikely(!__io_fill_cqe_req(ctx, req))) {
+                       spin_lock(&ctx->completion_lock);
+                       io_req_cqe_overflow(req);
+                       spin_unlock(&ctx->completion_lock);
+               }
        }
 
        if (unlikely(!nr_events))
index 9ea42a45da47037bdc198b7ac2be2adc67e01f49..a4a41ee3e80b5622d3f5292fb44fdee50d2fedc5 100644 (file)
@@ -351,8 +351,10 @@ BTF_ID(func, bpf_lsm_bpf_prog_alloc_security)
 BTF_ID(func, bpf_lsm_bpf_prog_free_security)
 BTF_ID(func, bpf_lsm_file_alloc_security)
 BTF_ID(func, bpf_lsm_file_free_security)
+#ifdef CONFIG_SECURITY_NETWORK
 BTF_ID(func, bpf_lsm_sk_alloc_security)
 BTF_ID(func, bpf_lsm_sk_free_security)
+#endif /* CONFIG_SECURITY_NETWORK */
 BTF_ID(func, bpf_lsm_task_free)
 BTF_SET_END(untrusted_lsm_hooks)
 
index 5aa2b5525f7937b06bcd389e3c3c215b4865a455..66bded14437735174de70195ffd2f81481f626d7 100644 (file)
@@ -152,7 +152,7 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
 {
        unsigned long flags;
 
-       hash = hash & HASHTAB_MAP_LOCK_MASK;
+       hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
 
        preempt_disable();
        if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
@@ -171,7 +171,7 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
                                      struct bucket *b, u32 hash,
                                      unsigned long flags)
 {
-       hash = hash & HASHTAB_MAP_LOCK_MASK;
+       hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
        raw_spin_unlock_irqrestore(&b->raw_lock, flags);
        __this_cpu_dec(*(htab->map_locked[hash]));
        preempt_enable();
index 13e4efc971e6d2d84fe62d9196a1f60b9d4b0f9d..190d9f9dc9870fcf077728be238c95e0c69880ad 100644 (file)
@@ -216,9 +216,6 @@ static void __bpf_prog_offload_destroy(struct bpf_prog *prog)
        if (offload->dev_state)
                offload->offdev->ops->destroy(prog);
 
-       /* Make sure BPF_PROG_GET_NEXT_ID can't find this dead program */
-       bpf_prog_free_id(prog, true);
-
        list_del_init(&offload->offloads);
        kfree(offload);
        prog->aux->offload = NULL;
index 64131f88c5537076a25bcfb13a04d81ea9a19ce4..ecca9366c7a6f5385e139c83015bea9f9d11aaed 100644 (file)
@@ -1972,7 +1972,7 @@ static void bpf_audit_prog(const struct bpf_prog *prog, unsigned int op)
                return;
        if (audit_enabled == AUDIT_OFF)
                return;
-       if (op == BPF_AUDIT_LOAD)
+       if (!in_irq() && !irqs_disabled())
                ctx = audit_context();
        ab = audit_log_start(ctx, GFP_ATOMIC, AUDIT_BPF);
        if (unlikely(!ab))
@@ -2001,7 +2001,7 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
        return id > 0 ? 0 : id;
 }
 
-void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
+void bpf_prog_free_id(struct bpf_prog *prog)
 {
        unsigned long flags;
 
@@ -2013,18 +2013,10 @@ void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
        if (!prog->aux->id)
                return;
 
-       if (do_idr_lock)
-               spin_lock_irqsave(&prog_idr_lock, flags);
-       else
-               __acquire(&prog_idr_lock);
-
+       spin_lock_irqsave(&prog_idr_lock, flags);
        idr_remove(&prog_idr, prog->aux->id);
        prog->aux->id = 0;
-
-       if (do_idr_lock)
-               spin_unlock_irqrestore(&prog_idr_lock, flags);
-       else
-               __release(&prog_idr_lock);
+       spin_unlock_irqrestore(&prog_idr_lock, flags);
 }
 
 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
@@ -2067,17 +2059,15 @@ static void bpf_prog_put_deferred(struct work_struct *work)
        prog = aux->prog;
        perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
        bpf_audit_prog(prog, BPF_AUDIT_UNLOAD);
+       bpf_prog_free_id(prog);
        __bpf_prog_put_noref(prog, true);
 }
 
-static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
+static void __bpf_prog_put(struct bpf_prog *prog)
 {
        struct bpf_prog_aux *aux = prog->aux;
 
        if (atomic64_dec_and_test(&aux->refcnt)) {
-               /* bpf_prog_free_id() must be called first */
-               bpf_prog_free_id(prog, do_idr_lock);
-
                if (in_irq() || irqs_disabled()) {
                        INIT_WORK(&aux->work, bpf_prog_put_deferred);
                        schedule_work(&aux->work);
@@ -2089,7 +2079,7 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 
 void bpf_prog_put(struct bpf_prog *prog)
 {
-       __bpf_prog_put(prog, true);
+       __bpf_prog_put(prog);
 }
 EXPORT_SYMBOL_GPL(bpf_prog_put);
 
index c2a2182ce570219bad9cc5362e15435d4873b6ba..c4ab9d6cdbe9c81fd13c9cbabc99790b656ea125 100644 (file)
@@ -438,6 +438,7 @@ struct bpf_iter_seq_task_vma_info {
         */
        struct bpf_iter_seq_task_common common;
        struct task_struct *task;
+       struct mm_struct *mm;
        struct vm_area_struct *vma;
        u32 tid;
        unsigned long prev_vm_start;
@@ -456,16 +457,19 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
        enum bpf_task_vma_iter_find_op op;
        struct vm_area_struct *curr_vma;
        struct task_struct *curr_task;
+       struct mm_struct *curr_mm;
        u32 saved_tid = info->tid;
 
        /* If this function returns a non-NULL vma, it holds a reference to
-        * the task_struct, and holds read lock on vma->mm->mmap_lock.
+        * the task_struct, holds a refcount on mm->mm_users, and holds
+        * read lock on vma->mm->mmap_lock.
         * If this function returns NULL, it does not hold any reference or
         * lock.
         */
        if (info->task) {
                curr_task = info->task;
                curr_vma = info->vma;
+               curr_mm = info->mm;
                /* In case of lock contention, drop mmap_lock to unblock
                 * the writer.
                 *
@@ -504,13 +508,15 @@ task_vma_seq_get_next(struct bpf_iter_seq_task_vma_info *info)
                 *    4.2) VMA2 and VMA2' covers different ranges, process
                 *         VMA2'.
                 */
-               if (mmap_lock_is_contended(curr_task->mm)) {
+               if (mmap_lock_is_contended(curr_mm)) {
                        info->prev_vm_start = curr_vma->vm_start;
                        info->prev_vm_end = curr_vma->vm_end;
                        op = task_vma_iter_find_vma;
-                       mmap_read_unlock(curr_task->mm);
-                       if (mmap_read_lock_killable(curr_task->mm))
+                       mmap_read_unlock(curr_mm);
+                       if (mmap_read_lock_killable(curr_mm)) {
+                               mmput(curr_mm);
                                goto finish;
+                       }
                } else {
                        op = task_vma_iter_next_vma;
                }
@@ -535,42 +541,47 @@ again:
                        op = task_vma_iter_find_vma;
                }
 
-               if (!curr_task->mm)
+               curr_mm = get_task_mm(curr_task);
+               if (!curr_mm)
                        goto next_task;
 
-               if (mmap_read_lock_killable(curr_task->mm))
+               if (mmap_read_lock_killable(curr_mm)) {
+                       mmput(curr_mm);
                        goto finish;
+               }
        }
 
        switch (op) {
        case task_vma_iter_first_vma:
-               curr_vma = find_vma(curr_task->mm, 0);
+               curr_vma = find_vma(curr_mm, 0);
                break;
        case task_vma_iter_next_vma:
-               curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
+               curr_vma = find_vma(curr_mm, curr_vma->vm_end);
                break;
        case task_vma_iter_find_vma:
                /* We dropped mmap_lock so it is necessary to use find_vma
                 * to find the next vma. This is similar to the  mechanism
                 * in show_smaps_rollup().
                 */
-               curr_vma = find_vma(curr_task->mm, info->prev_vm_end - 1);
+               curr_vma = find_vma(curr_mm, info->prev_vm_end - 1);
                /* case 1) and 4.2) above just use curr_vma */
 
                /* check for case 2) or case 4.1) above */
                if (curr_vma &&
                    curr_vma->vm_start == info->prev_vm_start &&
                    curr_vma->vm_end == info->prev_vm_end)
-                       curr_vma = find_vma(curr_task->mm, curr_vma->vm_end);
+                       curr_vma = find_vma(curr_mm, curr_vma->vm_end);
                break;
        }
        if (!curr_vma) {
                /* case 3) above, or case 2) 4.1) with vma->next == NULL */
-               mmap_read_unlock(curr_task->mm);
+               mmap_read_unlock(curr_mm);
+               mmput(curr_mm);
                goto next_task;
        }
        info->task = curr_task;
        info->vma = curr_vma;
+       info->mm = curr_mm;
        return curr_vma;
 
 next_task:
@@ -579,6 +590,7 @@ next_task:
 
        put_task_struct(curr_task);
        info->task = NULL;
+       info->mm = NULL;
        info->tid++;
        goto again;
 
@@ -587,6 +599,7 @@ finish:
                put_task_struct(curr_task);
        info->task = NULL;
        info->vma = NULL;
+       info->mm = NULL;
        return NULL;
 }
 
@@ -658,7 +671,9 @@ static void task_vma_seq_stop(struct seq_file *seq, void *v)
                 */
                info->prev_vm_start = ~0UL;
                info->prev_vm_end = info->vma->vm_end;
-               mmap_read_unlock(info->task->mm);
+               mmap_read_unlock(info->mm);
+               mmput(info->mm);
+               info->mm = NULL;
                put_task_struct(info->task);
                info->task = NULL;
        }
index 11f5ec0b80165a8fce0eba694decfd4651703b90..d0ed7d6f5eec508da03be58ec2c830b075895d2e 100644 (file)
@@ -488,6 +488,10 @@ again:
                /* reset fops->func and fops->trampoline for re-register */
                tr->fops->func = NULL;
                tr->fops->trampoline = 0;
+
+               /* reset im->image memory attr for arch_prepare_bpf_trampoline */
+               set_memory_nx((long)im->image, 1);
+               set_memory_rw((long)im->image, 1);
                goto again;
        }
 #endif
index a5255a0dcbb681f1e3681d1b7eac9694627095c1..dbef0b0967ae16291ad84bfb9ef0d16f3ca3c831 100644 (file)
@@ -1054,6 +1054,8 @@ static void print_insn_state(struct bpf_verifier_env *env,
  */
 static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
 {
+       size_t alloc_bytes;
+       void *orig = dst;
        size_t bytes;
 
        if (ZERO_OR_NULL_PTR(src))
@@ -1062,11 +1064,11 @@ static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t
        if (unlikely(check_mul_overflow(n, size, &bytes)))
                return NULL;
 
-       if (ksize(dst) < ksize(src)) {
-               kfree(dst);
-               dst = kmalloc_track_caller(kmalloc_size_roundup(bytes), flags);
-               if (!dst)
-                       return NULL;
+       alloc_bytes = max(ksize(orig), kmalloc_size_roundup(bytes));
+       dst = krealloc(orig, alloc_bytes, flags);
+       if (!dst) {
+               kfree(orig);
+               return NULL;
        }
 
        memcpy(dst, src, bytes);
@@ -2746,6 +2748,12 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
                         */
                        if (insn->src_reg == 0 && is_callback_calling_function(insn->imm))
                                return -ENOTSUPP;
+                       /* kfunc with imm==0 is invalid and fixup_kfunc_call will
+                        * catch this error later. Make backtracking conservative
+                        * with ENOTSUPP.
+                        */
+                       if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0)
+                               return -ENOTSUPP;
                        /* regular helper call sets R0 */
                        *reg_mask &= ~1;
                        if (*reg_mask & 0x3f) {
@@ -3287,7 +3295,9 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
                bool sanitize = reg && is_spillable_regtype(reg->type);
 
                for (i = 0; i < size; i++) {
-                       if (state->stack[spi].slot_type[i] == STACK_INVALID) {
+                       u8 type = state->stack[spi].slot_type[i];
+
+                       if (type != STACK_MISC && type != STACK_ZERO) {
                                sanitize = true;
                                break;
                        }
@@ -11822,10 +11832,17 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
         *      register B - not null
         * for JNE A, B, ... - A is not null in the false branch;
         * for JEQ A, B, ... - A is not null in the true branch.
+        *
+        * Since PTR_TO_BTF_ID points to a kernel struct that does
+        * not need to be null checked by the BPF program, i.e.,
+        * could be null even without PTR_MAYBE_NULL marking, so
+        * only propagate nullness when neither reg is that type.
         */
        if (!is_jmp32 && BPF_SRC(insn->code) == BPF_X &&
            __is_pointer_value(false, src_reg) && __is_pointer_value(false, dst_reg) &&
-           type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type)) {
+           type_may_be_null(src_reg->type) != type_may_be_null(dst_reg->type) &&
+           base_type(src_reg->type) != PTR_TO_BTF_ID &&
+           base_type(dst_reg->type) != PTR_TO_BTF_ID) {
                eq_branch_regs = NULL;
                switch (opcode) {
                case BPF_JEQ:
index eacc3702654d5c936915bf5a126ce365023b94c3..d56328e5080e9814e2d0b3f045a3d8d2a7f51c3f 100644 (file)
@@ -380,7 +380,6 @@ enum event_type_t {
 
 /*
  * perf_sched_events : >0 events exist
- * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  */
 
 static void perf_sched_delayed(struct work_struct *work);
@@ -389,7 +388,6 @@ static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
 static DEFINE_MUTEX(perf_sched_mutex);
 static atomic_t perf_sched_count;
 
-static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
 
 static atomic_t nr_mmap_events __read_mostly;
@@ -844,9 +842,16 @@ static void perf_cgroup_switch(struct task_struct *task)
        struct perf_cpu_context *cpuctx = this_cpu_ptr(&perf_cpu_context);
        struct perf_cgroup *cgrp;
 
-       cgrp = perf_cgroup_from_task(task, NULL);
+       /*
+        * cpuctx->cgrp is set when the first cgroup event enabled,
+        * and is cleared when the last cgroup event disabled.
+        */
+       if (READ_ONCE(cpuctx->cgrp) == NULL)
+               return;
 
        WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
+
+       cgrp = perf_cgroup_from_task(task, NULL);
        if (READ_ONCE(cpuctx->cgrp) == cgrp)
                return;
 
@@ -3631,8 +3636,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
         * to check if we have to switch out PMU state.
         * cgroup event are system-wide mode only
         */
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_switch(next);
+       perf_cgroup_switch(next);
 }
 
 static bool perf_less_group_idx(const void *l, const void *r)
@@ -4974,15 +4978,6 @@ static void unaccount_pmu_sb_event(struct perf_event *event)
                detach_sb_event(event);
 }
 
-static void unaccount_event_cpu(struct perf_event *event, int cpu)
-{
-       if (event->parent)
-               return;
-
-       if (is_cgroup_event(event))
-               atomic_dec(&per_cpu(perf_cgroup_events, cpu));
-}
-
 #ifdef CONFIG_NO_HZ_FULL
 static DEFINE_SPINLOCK(nr_freq_lock);
 #endif
@@ -5048,8 +5043,6 @@ static void unaccount_event(struct perf_event *event)
                        schedule_delayed_work(&perf_sched_work, HZ);
        }
 
-       unaccount_event_cpu(event, event->cpu);
-
        unaccount_pmu_sb_event(event);
 }
 
@@ -11679,15 +11672,6 @@ static void account_pmu_sb_event(struct perf_event *event)
                attach_sb_event(event);
 }
 
-static void account_event_cpu(struct perf_event *event, int cpu)
-{
-       if (event->parent)
-               return;
-
-       if (is_cgroup_event(event))
-               atomic_inc(&per_cpu(perf_cgroup_events, cpu));
-}
-
 /* Freq events need the tick to stay alive (see perf_event_task_tick). */
 static void account_freq_event_nohz(void)
 {
@@ -11775,8 +11759,6 @@ static void account_event(struct perf_event *event)
        }
 enabled:
 
-       account_event_cpu(event, event->cpu);
-
        account_pmu_sb_event(event);
 }
 
@@ -12339,12 +12321,12 @@ SYSCALL_DEFINE5(perf_event_open,
        if (flags & ~PERF_FLAG_ALL)
                return -EINVAL;
 
-       /* Do we allow access to perf_event_open(2) ? */
-       err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
+       err = perf_copy_attr(attr_uptr, &attr);
        if (err)
                return err;
 
-       err = perf_copy_attr(attr_uptr, &attr);
+       /* Do we allow access to perf_event_open(2) ? */
+       err = security_perf_event_open(&attr, PERF_SECURITY_OPEN);
        if (err)
                return err;
 
@@ -12689,7 +12671,8 @@ SYSCALL_DEFINE5(perf_event_open,
        return event_fd;
 
 err_context:
-       /* event->pmu_ctx freed by free_event() */
+       put_pmu_ctx(event->pmu_ctx);
+       event->pmu_ctx = NULL; /* _free_event() */
 err_locked:
        mutex_unlock(&ctx->mutex);
        perf_unpin_context(ctx);
@@ -12802,6 +12785,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
 
 err_pmu_ctx:
        put_pmu_ctx(pmu_ctx);
+       event->pmu_ctx = NULL; /* _free_event() */
 err_unlock:
        mutex_unlock(&ctx->mutex);
        perf_unpin_context(ctx);
@@ -12822,13 +12806,11 @@ static void __perf_pmu_remove(struct perf_event_context *ctx,
 
        perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) {
                perf_remove_from_context(event, 0);
-               unaccount_event_cpu(event, cpu);
                put_pmu_ctx(event->pmu_ctx);
                list_add(&event->migrate_entry, events);
 
                for_each_sibling_event(sibling, event) {
                        perf_remove_from_context(sibling, 0);
-                       unaccount_event_cpu(sibling, cpu);
                        put_pmu_ctx(sibling->pmu_ctx);
                        list_add(&sibling->migrate_entry, events);
                }
@@ -12847,7 +12829,6 @@ static void __perf_pmu_install_event(struct pmu *pmu,
 
        if (event->state >= PERF_EVENT_STATE_OFF)
                event->state = PERF_EVENT_STATE_INACTIVE;
-       account_event_cpu(event, cpu);
        perf_install_in_context(ctx, event, cpu);
 }
 
@@ -13231,7 +13212,7 @@ inherit_event(struct perf_event *parent_event,
        pmu_ctx = find_get_pmu_context(child_event->pmu, child_ctx, child_event);
        if (IS_ERR(pmu_ctx)) {
                free_event(child_event);
-               return NULL;
+               return ERR_CAST(pmu_ctx);
        }
        child_event->pmu_ctx = pmu_ctx;
 
@@ -13742,8 +13723,7 @@ static int __perf_cgroup_move(void *info)
        struct task_struct *task = info;
 
        preempt_disable();
-       if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
-               perf_cgroup_switch(task);
+       perf_cgroup_switch(task);
        preempt_enable();
 
        return 0;
index 086a22d1adb78f4a53760bc8e975c55a5cd6a186..a8074079b09e87ca867c2cfc84be021076198a65 100644 (file)
@@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
        }
 
        futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
-       if (!futexv)
-               return -ENOMEM;
+       if (!futexv) {
+               ret = -ENOMEM;
+               goto destroy_timer;
+       }
 
        ret = futex_parse_waitv(futexv, waiters, nr_futexes);
        if (!ret)
                ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);
 
+       kfree(futexv);
+
+destroy_timer:
        if (timeout) {
                hrtimer_cancel(&to.timer);
                destroy_hrtimer_on_stack(&to.timer);
        }
-
-       kfree(futexv);
        return ret;
 }
 
index 473036b43c832b2a94c8eef8edb9ef82c11b9a6c..81b97f0f65564e9dafc66c89d8627c8429939306 100755 (executable)
@@ -14,6 +14,8 @@ include/
 arch/$SRCARCH/include/
 "
 
+type cpio > /dev/null
+
 # Support incremental builds by skipping archive generation
 # if timestamps of files being archived are not changed.
 
index f35d9cc1aab15447961b347da45338d828abe655..bfbc12da33267f827112009da05bf44ae86e6ba4 100644 (file)
@@ -157,14 +157,11 @@ static void test_kallsyms_compression_ratio(void)
 static int lookup_name(void *data, const char *name, struct module *mod, unsigned long addr)
 {
        u64 t0, t1, t;
-       unsigned long flags;
        struct test_stat *stat = (struct test_stat *)data;
 
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        (void)kallsyms_lookup_name(name);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
 
        t = t1 - t0;
        if (t < stat->min)
@@ -234,18 +231,15 @@ static int find_symbol(void *data, const char *name, struct module *mod, unsigne
 static void test_perf_kallsyms_on_each_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
        stat.perf = 1;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_symbol(find_symbol, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
@@ -270,17 +264,14 @@ static int match_symbol(void *data, unsigned long addr)
 static void test_perf_kallsyms_on_each_match_symbol(void)
 {
        u64 t0, t1;
-       unsigned long flags;
        struct test_stat stat;
 
        memset(&stat, 0, sizeof(stat));
        stat.max = INT_MAX;
        stat.name = stub_name;
-       local_irq_save(flags);
-       t0 = sched_clock();
+       t0 = ktime_get_ns();
        kallsyms_on_each_match_symbol(match_symbol, stat.name, &stat);
-       t1 = sched_clock();
-       local_irq_restore(flags);
+       t1 = ktime_get_ns();
        pr_info("kallsyms_on_each_match_symbol() traverse all: %lld ns\n", t1 - t0);
 }
 
index dcec1b743c69419ddd7d517757c8f061bd8f3da3..a60c561724be9601986261c63f97e83b655161a7 100644 (file)
@@ -159,7 +159,7 @@ static bool __report_matches(const struct expect_report *r)
        const bool is_assert = (r->access[0].type | r->access[1].type) & KCSAN_ACCESS_ASSERT;
        bool ret = false;
        unsigned long flags;
-       typeof(observed.lines) expect;
+       typeof(*observed.lines) *expect;
        const char *end;
        char *cur;
        int i;
@@ -168,6 +168,10 @@ static bool __report_matches(const struct expect_report *r)
        if (!report_available())
                return false;
 
+       expect = kmalloc(sizeof(observed.lines), GFP_KERNEL);
+       if (WARN_ON(!expect))
+               return false;
+
        /* Generate expected report contents. */
 
        /* Title */
@@ -253,6 +257,7 @@ static bool __report_matches(const struct expect_report *r)
                strstr(observed.lines[2], expect[1])));
 out:
        spin_unlock_irqrestore(&observed.lock, flags);
+       kfree(expect);
        return ret;
 }
 
index 7779ee8abc2a08b4a9830b375ea65fa2a3ca013f..010cf4e6d0b8f07808257b2c16be1ad72a20a337 100644 (file)
@@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
  * set this bit before looking at the lock.
  */
 
-static __always_inline void
-rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+static __always_inline struct task_struct *
+rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
 {
        unsigned long val = (unsigned long)owner;
 
        if (rt_mutex_has_waiters(lock))
                val |= RT_MUTEX_HAS_WAITERS;
 
-       WRITE_ONCE(lock->owner, (struct task_struct *)val);
+       return (struct task_struct *)val;
+}
+
+static __always_inline void
+rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
+{
+       /*
+        * lock->wait_lock is held but explicit acquire semantics are needed
+        * for a new lock owner so WRITE_ONCE is insufficient.
+        */
+       xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
+}
+
+static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
+{
+       /* lock->wait_lock is held so the unlock provides release semantics. */
+       WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
 }
 
 static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
@@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
                        ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
 }
 
-static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
+static __always_inline void
+fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
 {
        unsigned long owner, *p = (unsigned long *) &lock->owner;
 
@@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
         * still set.
         */
        owner = READ_ONCE(*p);
-       if (owner & RT_MUTEX_HAS_WAITERS)
-               WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+       if (owner & RT_MUTEX_HAS_WAITERS) {
+               /*
+                * See rt_mutex_set_owner() and rt_mutex_clear_owner() on
+                * why xchg_acquire() is used for updating owner for
+                * locking and WRITE_ONCE() for unlocking.
+                *
+                * WRITE_ONCE() would work for the acquire case too, but
+                * in case that the lock acquisition failed it might
+                * force other lockers into the slow path unnecessarily.
+                */
+               if (acquire_lock)
+                       xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
+               else
+                       WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
+       }
 }
 
 /*
@@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
                owner = *p;
        } while (cmpxchg_relaxed(p, owner,
                                 owner | RT_MUTEX_HAS_WAITERS) != owner);
+
+       /*
+        * The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
+        * operations in the event of contention. Ensure the successful
+        * cmpxchg is visible.
+        */
+       smp_mb__after_atomic();
 }
 
 /*
@@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
         * try_to_take_rt_mutex() sets the lock waiters bit
         * unconditionally. Clean this up.
         */
-       fixup_rt_mutex_waiters(lock);
+       fixup_rt_mutex_waiters(lock, true);
 
        return ret;
 }
@@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
         * try_to_take_rt_mutex() sets the waiter bit
         * unconditionally. We might have to fix that up.
         */
-       fixup_rt_mutex_waiters(lock);
+       fixup_rt_mutex_waiters(lock, true);
 
        trace_contention_end(lock, ret);
 
@@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
         * try_to_take_rt_mutex() sets the waiter bit unconditionally.
         * We might have to fix that up:
         */
-       fixup_rt_mutex_waiters(lock);
+       fixup_rt_mutex_waiters(lock, true);
        debug_rt_mutex_free_waiter(&waiter);
 
        trace_contention_end(lock, 0);
index 900220941caacc116445948d9614fe495fbc1ce9..cb9fdff76a8a32c377376ae76c7182c9c802f046 100644 (file)
@@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
 {
        debug_rt_mutex_proxy_unlock(lock);
-       rt_mutex_set_owner(lock, NULL);
+       rt_mutex_clear_owner(lock);
 }
 
 /**
@@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
         * have to fix that up.
         */
-       fixup_rt_mutex_waiters(lock);
+       fixup_rt_mutex_waiters(lock, true);
        raw_spin_unlock_irq(&lock->wait_lock);
 
        return ret;
@@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
         * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
         * have to fix that up.
         */
-       fixup_rt_mutex_waiters(lock);
+       fixup_rt_mutex_waiters(lock, false);
 
        raw_spin_unlock_irq(&lock->wait_lock);
 
index 7decf1e9c48635c3b151661208108a70c5d63e22..a5ed2e53547c94fdcd016427111f2bc26ee8cc74 100644 (file)
@@ -123,6 +123,7 @@ bool console_srcu_read_lock_is_held(void)
 {
        return srcu_read_lock_held(&console_srcu);
 }
+EXPORT_SYMBOL(console_srcu_read_lock_is_held);
 #endif
 
 enum devkmsg_log_bits {
@@ -1891,6 +1892,7 @@ static void console_lock_spinning_enable(void)
 /**
  * console_lock_spinning_disable_and_check - mark end of code where another
  *     thread was able to busy wait and check if there is a waiter
+ * @cookie: cookie returned from console_srcu_read_lock()
  *
  * This is called at the end of the section where spinning is allowed.
  * It has two functions. First, it is a signal that it is no longer
index 25b582b6ee5f78949d7646b2c23f0c4ba593a5d6..bb1ee6d7bddea70b11a7aa136b419a153534a056 100644 (file)
@@ -2604,27 +2604,71 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
                .user_mask = NULL,
                .flags     = SCA_USER,  /* clear the user requested mask */
        };
+       union cpumask_rcuhead {
+               cpumask_t cpumask;
+               struct rcu_head rcu;
+       };
 
        __do_set_cpus_allowed(p, &ac);
-       kfree(ac.user_mask);
+
+       /*
+        * Because this is called with p->pi_lock held, it is not possible
+        * to use kfree() here (when PREEMPT_RT=y), therefore punt to using
+        * kfree_rcu().
+        */
+       kfree_rcu((union cpumask_rcuhead *)ac.user_mask, rcu);
+}
+
+static cpumask_t *alloc_user_cpus_ptr(int node)
+{
+       /*
+        * See do_set_cpus_allowed() above for the rcu_head usage.
+        */
+       int size = max_t(int, cpumask_size(), sizeof(struct rcu_head));
+
+       return kmalloc_node(size, GFP_KERNEL, node);
 }
 
 int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
                      int node)
 {
+       cpumask_t *user_mask;
        unsigned long flags;
 
-       if (!src->user_cpus_ptr)
+       /*
+        * Always clear dst->user_cpus_ptr first as their user_cpus_ptr's
+        * may differ by now due to racing.
+        */
+       dst->user_cpus_ptr = NULL;
+
+       /*
+        * This check is racy and losing the race is a valid situation.
+        * It is not worth the extra overhead of taking the pi_lock on
+        * every fork/clone.
+        */
+       if (data_race(!src->user_cpus_ptr))
                return 0;
 
-       dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
-       if (!dst->user_cpus_ptr)
+       user_mask = alloc_user_cpus_ptr(node);
+       if (!user_mask)
                return -ENOMEM;
 
-       /* Use pi_lock to protect content of user_cpus_ptr */
+       /*
+        * Use pi_lock to protect content of user_cpus_ptr
+        *
+        * Though unlikely, user_cpus_ptr can be reset to NULL by a concurrent
+        * do_set_cpus_allowed().
+        */
        raw_spin_lock_irqsave(&src->pi_lock, flags);
-       cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       if (src->user_cpus_ptr) {
+               swap(dst->user_cpus_ptr, user_mask);
+               cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+       }
        raw_spin_unlock_irqrestore(&src->pi_lock, flags);
+
+       if (unlikely(user_mask))
+               kfree(user_mask);
+
        return 0;
 }
 
@@ -3581,6 +3625,11 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
        return false;
 }
 
+static inline cpumask_t *alloc_user_cpus_ptr(int node)
+{
+       return NULL;
+}
+
 #endif /* !CONFIG_SMP */
 
 static void
@@ -5504,7 +5553,9 @@ void scheduler_tick(void)
        unsigned long thermal_pressure;
        u64 resched_latency;
 
-       arch_scale_freq_tick();
+       if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+               arch_scale_freq_tick();
+
        sched_clock_tick();
 
        rq_lock(rq, &rf);
@@ -8239,8 +8290,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
        if (retval)
                goto out_put_task;
 
-       user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
-       if (!user_mask) {
+       user_mask = alloc_user_cpus_ptr(NUMA_NO_NODE);
+       if (IS_ENABLED(CONFIG_SMP) && !user_mask) {
                retval = -ENOMEM;
                goto out_put_task;
        }
index 5fd54bf0e886726dfb9fb955b0099239aedc9464..88b31f096fb2d92f3758e368fd728f04b9a856b5 100644 (file)
@@ -1442,6 +1442,8 @@ static int do_prlimit(struct task_struct *tsk, unsigned int resource,
 
        if (resource >= RLIM_NLIMITS)
                return -EINVAL;
+       resource = array_index_nospec(resource, RLIM_NLIMITS);
+
        if (new_rlim) {
                if (new_rlim->rlim_cur > new_rlim->rlim_max)
                        return -EINVAL;
index 475ecceda7688cebc11b9afa8f919f70b2cdf23d..5e2c2c26b3cc9190d1685d2da0307d196188756e 100644 (file)
@@ -18,7 +18,7 @@
 #include "tick-internal.h"
 
 /**
- * tick_program_event
+ * tick_program_event - program the CPU local timer device for the next event
  */
 int tick_program_event(ktime_t expires, int force)
 {
@@ -99,7 +99,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
 }
 
 /**
- * tick_check_oneshot_mode - check whether the system is in oneshot mode
+ * tick_oneshot_mode_active - check whether the system is in oneshot mode
  *
  * returns 1 when either nohz or highres are enabled. otherwise 0.
  */
index 526257b3727ca9d0400c7cceb29647d7642c41e4..f4198af60fee5de398d508fe2d155855aa6fde3b 100644 (file)
@@ -462,7 +462,7 @@ struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec)
 EXPORT_SYMBOL(ns_to_kernel_old_timeval);
 
 /**
- * set_normalized_timespec - set timespec sec and nsec parts and normalize
+ * set_normalized_timespec64 - set timespec sec and nsec parts and normalize
  *
  * @ts:                pointer to timespec variable to be set
  * @sec:       seconds to set
@@ -526,7 +526,7 @@ struct timespec64 ns_to_timespec64(s64 nsec)
 EXPORT_SYMBOL(ns_to_timespec64);
 
 /**
- * msecs_to_jiffies: - convert milliseconds to jiffies
+ * __msecs_to_jiffies: - convert milliseconds to jiffies
  * @m: time in milliseconds
  *
  * conversion is done as follows:
@@ -541,12 +541,12 @@ EXPORT_SYMBOL(ns_to_timespec64);
  *   handling any 32-bit overflows.
  *   for the details see __msecs_to_jiffies()
  *
- * msecs_to_jiffies() checks for the passed in value being a constant
+ * __msecs_to_jiffies() checks for the passed in value being a constant
  * via __builtin_constant_p() allowing gcc to eliminate most of the
  * code, __msecs_to_jiffies() is called if the value passed does not
  * allow constant folding and the actual conversion must be done at
  * runtime.
- * the _msecs_to_jiffies helpers are the HZ dependent conversion
+ * The _msecs_to_jiffies helpers are the HZ dependent conversion
  * routines found in include/linux/jiffies.h
  */
 unsigned long __msecs_to_jiffies(const unsigned int m)
index f72b9f1de178efd858958eb7dc1251a395f55add..5579ead449f25b630882d31bfcf9e3211297230c 100644 (file)
@@ -1590,10 +1590,10 @@ void __weak read_persistent_clock64(struct timespec64 *ts)
 /**
  * read_persistent_wall_and_boot_offset - Read persistent clock, and also offset
  *                                        from the boot.
+ * @wall_time:   current time as returned by persistent clock
+ * @boot_offset:  offset that is defined as wall_time - boot_time
  *
  * Weak dummy function for arches that do not yet support it.
- * @wall_time: - current time as returned by persistent clock
- * @boot_offset: - offset that is defined as wall_time - boot_time
  *
  * The default function calculates offset based on the current value of
  * local_clock(). This way architectures that support sched_clock() but don't
@@ -1701,7 +1701,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
 }
 
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE)
-/**
+/*
  * We have three kinds of time sources to use for sleep time
  * injection, the preference order is:
  * 1) non-stop clocksource
@@ -1722,7 +1722,7 @@ bool timekeeping_rtc_skipresume(void)
        return !suspend_timing_needed;
 }
 
-/**
+/*
  * 1) can be determined whether to use or not only when doing
  * timekeeping_resume() which is invoked after rtc_suspend(),
  * so we can't skip rtc_suspend() surely if system has 1).
index 3bbd3f0c810c895a1c34d1f90af268e3c90b453a..f47274de012b546a65e5b3770e5da72784faebd3 100644 (file)
@@ -848,6 +848,9 @@ static int bpf_send_signal_common(u32 sig, enum pid_type type)
                return -EPERM;
        if (unlikely(!nmi_uaccess_okay()))
                return -EPERM;
+       /* Task should not be pid=1 to avoid kernel panic. */
+       if (unlikely(is_global_init(current)))
+               return -EPERM;
 
        if (irqs_disabled()) {
                /* Do an early check on signal validity. Otherwise,
index f5f51166d8c20f2eb0986e4a715b5a16ed0427e5..cc32743c1171f8eb3e18129d77283733995ae05d 100644 (file)
@@ -23,8 +23,10 @@ static struct string_stream_fragment *alloc_string_stream_fragment(
                return ERR_PTR(-ENOMEM);
 
        frag->fragment = kunit_kmalloc(test, len, gfp);
-       if (!frag->fragment)
+       if (!frag->fragment) {
+               kunit_kfree(test, frag);
                return ERR_PTR(-ENOMEM);
+       }
 
        return frag;
 }
index 45e93ece8ba0d243971908a08afbaf04e374efdf..2afe4c5d8919102b99f2931dd51ed457a9aa89ca 100644 (file)
@@ -23,7 +23,6 @@
                }                                                               \
                if (!--retry)                                                   \
                        break;                                                  \
-               cpu_relax();                                                    \
        }                                                                       \
 } while (0)
 
index a0ad2a7959b5d24d7e892fc69aabed60686ec782..8d7519a8f308d94b5d4ac92e4a61fcbc6e6dd670 100644 (file)
@@ -470,22 +470,27 @@ int sg_alloc_append_table_from_pages(struct sg_append_table *sgt_append,
                return -EOPNOTSUPP;
 
        if (sgt_append->prv) {
+               unsigned long next_pfn = (page_to_phys(sg_page(sgt_append->prv)) +
+                       sgt_append->prv->offset + sgt_append->prv->length) / PAGE_SIZE;
+
                if (WARN_ON(offset))
                        return -EINVAL;
 
                /* Merge contiguous pages into the last SG */
                prv_len = sgt_append->prv->length;
-               last_pg = sg_page(sgt_append->prv);
-               while (n_pages && pages_are_mergeable(last_pg, pages[0])) {
-                       if (sgt_append->prv->length + PAGE_SIZE > max_segment)
-                               break;
-                       sgt_append->prv->length += PAGE_SIZE;
-                       last_pg = pages[0];
-                       pages++;
-                       n_pages--;
+               if (page_to_pfn(pages[0]) == next_pfn) {
+                       last_pg = pfn_to_page(next_pfn - 1);
+                       while (n_pages && pages_are_mergeable(pages[0], last_pg)) {
+                               if (sgt_append->prv->length + PAGE_SIZE > max_segment)
+                                       break;
+                               sgt_append->prv->length += PAGE_SIZE;
+                               last_pg = pages[0];
+                               pages++;
+                               n_pages--;
+                       }
+                       if (!n_pages)
+                               goto out;
                }
-               if (!n_pages)
-                       goto out;
        }
 
        /* compute number of contiguous chunks */
index 6bdc1cd15f7614e5f4eb2dba7618ba8a16be2249..ec10506834b6291ad189381c7cbf544ebe5fc4ec 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/**
+/*
  * lib/minmax.c: windowed min/max tracker
  *
  * Kathleen Nichols' algorithm for tracking the minimum (or maximum)
index db895230ee7e4e46304fc005ebf4b78864d6db77..7fcdb98c9e68930260d063ab581490cacc01152d 100644 (file)
@@ -94,6 +94,8 @@ static int hugetlb_acct_memory(struct hstate *h, long delta);
 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
 static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+               unsigned long start, unsigned long end);
 
 static inline bool subpool_is_free(struct hugepage_subpool *spool)
 {
@@ -1181,7 +1183,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 
 /*
  * Reset and decrement one ref on hugepage private reservation.
- * Called with mm->mmap_sem writer semaphore held.
+ * Called with mm->mmap_lock writer semaphore held.
  * This function should be only used by move_vma() and operate on
  * same sized vma. It should never come here with last ref on the
  * reservation.
@@ -4834,6 +4836,25 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
 {
        if (addr & ~(huge_page_mask(hstate_vma(vma))))
                return -EINVAL;
+
+       /*
+        * PMD sharing is only possible for PUD_SIZE-aligned address ranges
+        * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
+        * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
+        */
+       if (addr & ~PUD_MASK) {
+               /*
+                * hugetlb_vm_op_split is called right before we attempt to
+                * split the VMA. We will need to unshare PMDs in the old and
+                * new VMAs, so let's unshare before we split.
+                */
+               unsigned long floor = addr & PUD_MASK;
+               unsigned long ceil = floor + PUD_SIZE;
+
+               if (floor >= vma->vm_start && ceil <= vma->vm_end)
+                       hugetlb_unshare_pmds(vma, floor, ceil);
+       }
+
        return 0;
 }
 
@@ -5131,7 +5152,7 @@ static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
 
        /*
         * We don't have to worry about the ordering of src and dst ptlocks
-        * because exclusive mmap_sem (or the i_mmap_lock) prevents deadlock.
+        * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
         */
        if (src_ptl != dst_ptl)
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
@@ -6639,8 +6660,17 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                spinlock_t *ptl;
                ptep = huge_pte_offset(mm, address, psize);
                if (!ptep) {
-                       address |= last_addr_mask;
-                       continue;
+                       if (!uffd_wp) {
+                               address |= last_addr_mask;
+                               continue;
+                       }
+                       /*
+                        * Userfaultfd wr-protect requires pgtable
+                        * pre-allocations to install pte markers.
+                        */
+                       ptep = huge_pte_alloc(mm, vma, address, psize);
+                       if (!ptep)
+                               break;
                }
                ptl = huge_pte_lock(h, mm, ptep);
                if (huge_pmd_unshare(mm, vma, address, ptep)) {
@@ -6658,16 +6688,13 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                }
                pte = huge_ptep_get(ptep);
                if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
-                       spin_unlock(ptl);
-                       continue;
-               }
-               if (unlikely(is_hugetlb_entry_migration(pte))) {
+                       /* Nothing to do. */
+               } else if (unlikely(is_hugetlb_entry_migration(pte))) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
                        struct page *page = pfn_swap_entry_to_page(entry);
+                       pte_t newpte = pte;
 
-                       if (!is_readable_migration_entry(entry)) {
-                               pte_t newpte;
-
+                       if (is_writable_migration_entry(entry)) {
                                if (PageAnon(page))
                                        entry = make_readable_exclusive_migration_entry(
                                                                swp_offset(entry));
@@ -6675,25 +6702,22 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
                                        entry = make_readable_migration_entry(
                                                                swp_offset(entry));
                                newpte = swp_entry_to_pte(entry);
-                               if (uffd_wp)
-                                       newpte = pte_swp_mkuffd_wp(newpte);
-                               else if (uffd_wp_resolve)
-                                       newpte = pte_swp_clear_uffd_wp(newpte);
-                               set_huge_pte_at(mm, address, ptep, newpte);
                                pages++;
                        }
-                       spin_unlock(ptl);
-                       continue;
-               }
-               if (unlikely(pte_marker_uffd_wp(pte))) {
-                       /*
-                        * This is changing a non-present pte into a none pte,
-                        * no need for huge_ptep_modify_prot_start/commit().
-                        */
+
+                       if (uffd_wp)
+                               newpte = pte_swp_mkuffd_wp(newpte);
+                       else if (uffd_wp_resolve)
+                               newpte = pte_swp_clear_uffd_wp(newpte);
+                       if (!pte_same(pte, newpte))
+                               set_huge_pte_at(mm, address, ptep, newpte);
+               } else if (unlikely(is_pte_marker(pte))) {
+                       /* No other markers apply for now. */
+                       WARN_ON_ONCE(!pte_marker_uffd_wp(pte));
                        if (uffd_wp_resolve)
+                               /* Safe to modify directly (non-present->none). */
                                huge_pte_clear(mm, address, ptep, psize);
-               }
-               if (!huge_pte_none(pte)) {
+               } else if (!huge_pte_none(pte)) {
                        pte_t old_pte;
                        unsigned int shift = huge_page_shift(hstate_vma(vma));
 
@@ -7328,26 +7352,21 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
        }
 }
 
-/*
- * This function will unconditionally remove all the shared pmd pgtable entries
- * within the specific vma for a hugetlbfs memory range.
- */
-void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
+                                  unsigned long start,
+                                  unsigned long end)
 {
        struct hstate *h = hstate_vma(vma);
        unsigned long sz = huge_page_size(h);
        struct mm_struct *mm = vma->vm_mm;
        struct mmu_notifier_range range;
-       unsigned long address, start, end;
+       unsigned long address;
        spinlock_t *ptl;
        pte_t *ptep;
 
        if (!(vma->vm_flags & VM_MAYSHARE))
                return;
 
-       start = ALIGN(vma->vm_start, PUD_SIZE);
-       end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
-
        if (start >= end)
                return;
 
@@ -7379,6 +7398,16 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
        mmu_notifier_invalidate_range_end(&range);
 }
 
+/*
+ * This function will unconditionally remove all the shared pmd pgtable entries
+ * within the specific vma for a hugetlbfs memory range.
+ */
+void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
+{
+       hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
+                       ALIGN_DOWN(vma->vm_end, PUD_SIZE));
+}
+
 #ifdef CONFIG_CMA
 static bool cma_reserve_called __initdata;
 
index 1d02757e90a3266a0d2f66872c68ba117115b33b..22598b20c7b758406f2f8e34d809a7cc621729ab 100644 (file)
@@ -119,7 +119,7 @@ EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
  * Whether the KASAN KUnit test suite is currently being executed.
  * Updated in kasan_test.c.
  */
-bool kasan_kunit_executing;
+static bool kasan_kunit_executing;
 
 void kasan_kunit_test_suite_start(void)
 {
index 5cb401aa2b9d8b424462f4bded8511491d1c7045..79be1313332236e54155fa15ae6705c0845f642b 100644 (file)
@@ -1460,14 +1460,6 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        if (!hugepage_vma_check(vma, vma->vm_flags, false, false, false))
                return SCAN_VMA_CHECK;
 
-       /*
-        * Symmetry with retract_page_tables(): Exclude MAP_PRIVATE mappings
-        * that got written to. Without this, we'd have to also lock the
-        * anon_vma if one exists.
-        */
-       if (vma->anon_vma)
-               return SCAN_VMA_CHECK;
-
        /* Keep pmd pgtable for uffd-wp; see comment in retract_page_tables() */
        if (userfaultfd_wp(vma))
                return SCAN_PTE_UFFD_WP;
@@ -1567,8 +1559,14 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
        }
 
        /* step 4: remove pte entries */
+       /* we make no change to anon, but protect concurrent anon page lookup */
+       if (vma->anon_vma)
+               anon_vma_lock_write(vma->anon_vma);
+
        collapse_and_free_pmd(mm, vma, haddr, pmd);
 
+       if (vma->anon_vma)
+               anon_vma_unlock_write(vma->anon_vma);
        i_mmap_unlock_write(vma->vm_file->f_mapping);
 
 maybe_install_pmd:
@@ -2649,7 +2647,7 @@ int madvise_collapse(struct vm_area_struct *vma, struct vm_area_struct **prev,
                                goto out_nolock;
                        }
 
-                       hend = vma->vm_end & HPAGE_PMD_MASK;
+                       hend = min(hend, vma->vm_end & HPAGE_PMD_MASK);
                }
                mmap_assert_locked(mm);
                memset(cc->node_load, 0, sizeof(cc->node_load));
index a56a6d17e201e3cecc3f2b8d66361db2ee5e91b0..b6ea204d4e23bdf862db9d0eb804c09c6286d45e 100644 (file)
@@ -130,7 +130,7 @@ static int replace_anon_vma_name(struct vm_area_struct *vma,
 #endif /* CONFIG_ANON_VMA_NAME */
 /*
  * Update the vm_flags on region of a vma, splitting it or merging it as
- * necessary.  Must be called with mmap_sem held for writing;
+ * necessary.  Must be called with mmap_lock held for writing;
  * Caller should ensure anon_name stability by raising its refcount even when
  * anon_name belongs to a valid vma because this function might free that vma.
  */
index 511d4783dcf1d86374c14d528cfec69c51247749..685e30e6d27c5b698b8842a5609ca88bc34a370c 100644 (file)
@@ -836,7 +836,7 @@ void __init_memblock memblock_free(void *ptr, size_t size)
  * @base: phys starting address of the  boot memory block
  * @size: size of the boot memory block in bytes
  *
- * Free boot memory block previously allocated by memblock_alloc_xx() API.
+ * Free boot memory block previously allocated by memblock_phys_alloc_xx() API.
  * The freeing memory will not be released to the buddy allocator.
  */
 int __init_memblock memblock_phys_free(phys_addr_t base, phys_addr_t size)
@@ -1640,7 +1640,13 @@ void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
        end = PFN_DOWN(base + size);
 
        for (; cursor < end; cursor++) {
-               memblock_free_pages(pfn_to_page(cursor), cursor, 0);
+               /*
+                * Reserved pages are always initialized by the end of
+                * memblock_free_all() (by memmap_init() and, if deferred
+                * initialization is enabled, memmap_init_reserved_pages()), so
+                * these pages can be released directly to the buddy allocator.
+                */
+               __free_pages_core(pfn_to_page(cursor), 0);
                totalram_pages_inc();
        }
 }
index 87d929316d57264197b01cc815209ca3a5d039b7..425a9349e610825f69745e5aa446e997e741f1d9 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1524,6 +1524,10 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
        if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
                return 1;
 
+       /* Do we need write faults for uffd-wp tracking? */
+       if (userfaultfd_wp(vma))
+               return 1;
+
        /* Specialty mapping? */
        if (vm_flags & VM_PFNMAP)
                return 0;
@@ -2290,7 +2294,7 @@ static inline int munmap_sidetree(struct vm_area_struct *vma,
  * @start: The aligned start address to munmap.
  * @end: The aligned end address to munmap.
  * @uf: The userfaultfd list_head
- * @downgrade: Set to true to attempt a write downgrade of the mmap_sem
+ * @downgrade: Set to true to attempt a write downgrade of the mmap_lock
  *
  * If @downgrade is true, check return code for potential release of the lock.
  */
@@ -2465,7 +2469,7 @@ map_count_exceeded:
  * @len: The length of the range to munmap
  * @uf: The userfaultfd list_head
  * @downgrade: set to true if the user wants to attempt to write_downgrade the
- * mmap_sem
+ * mmap_lock
  *
  * This function takes a @mas that is either pointing to the previous VMA or set
  * to MA_START and sets it up to remove the mapping(s).  The @len will be
index 214c70e1d05942e8a0a9a2e4ece7a6f5086f9c62..5b83938ecb67cf4533c4a30e997cea3e3111b285 100644 (file)
@@ -559,7 +559,6 @@ void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas)
 
 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
 {
-       mm->map_count++;
        vma->vm_mm = mm;
 
        /* add the VMA to the mapping */
@@ -587,6 +586,7 @@ static void mas_add_vma_to_mm(struct ma_state *mas, struct mm_struct *mm,
        BUG_ON(!vma->vm_region);
 
        setup_vma_to_mm(vma, mm);
+       mm->map_count++;
 
        /* add the VMA to the tree */
        vma_mas_store(vma, mas);
@@ -1240,6 +1240,7 @@ share:
 error_just_free:
        up_write(&nommu_region_sem);
 error:
+       mas_destroy(&mas);
        if (region->vm_file)
                fput(region->vm_file);
        kmem_cache_free(vm_region_jar, region);
@@ -1250,7 +1251,6 @@ error:
 
 sharing_violation:
        up_write(&nommu_region_sem);
-       mas_destroy(&mas);
        pr_warn("Attempt to share mismatched mappings\n");
        ret = -EINVAL;
        goto error;
@@ -1347,6 +1347,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        if (vma->vm_file)
                return -ENOMEM;
 
+       mm = vma->vm_mm;
        if (mm->map_count >= sysctl_max_map_count)
                return -ENOMEM;
 
@@ -1398,6 +1399,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        mas_set_range(&mas, vma->vm_start, vma->vm_end - 1);
        mas_store(&mas, vma);
        vma_mas_store(new, &mas);
+       mm->map_count++;
        return 0;
 
 err_mas_preallocate:
@@ -1509,7 +1511,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list
 erase_whole_vma:
        if (delete_vma_from_mm(vma))
                ret = -ENOMEM;
-       delete_vma(mm, vma);
+       else
+               delete_vma(mm, vma);
        return ret;
 }
 
index c301487be5fb405d2f2d12a9c497e951dbecdbe3..0005ab2c29af7bc3241bd50514036c2cdbf7a82b 100644 (file)
@@ -478,12 +478,10 @@ bool shmem_is_huge(struct vm_area_struct *vma, struct inode *inode,
        if (vma && ((vma->vm_flags & VM_NOHUGEPAGE) ||
            test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)))
                return false;
-       if (shmem_huge_force)
-               return true;
-       if (shmem_huge == SHMEM_HUGE_FORCE)
-               return true;
        if (shmem_huge == SHMEM_HUGE_DENY)
                return false;
+       if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
+               return true;
 
        switch (SHMEM_SB(inode->i_sb)->huge) {
        case SHMEM_HUGE_ALWAYS:
index 7a269db050eed88b5273d2c3820ff8e453de518b..29300fc1289a8375188853a07edb64d69df634e0 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2211,6 +2211,8 @@ static int drain_freelist(struct kmem_cache *cache,
                raw_spin_unlock_irq(&n->list_lock);
                slab_destroy(cache, slab);
                nr_freed++;
+
+               cond_resched();
        }
 out:
        return nr_freed;
index 9630b1275557978dd45a5e4a0322192bbc30af42..82c7005ede65679bd528637ff029573a03e7776e 100644 (file)
@@ -305,13 +305,12 @@ static void xen_9pfs_front_free(struct xen_9pfs_front_priv *priv)
        kfree(priv);
 }
 
-static int xen_9pfs_front_remove(struct xenbus_device *dev)
+static void xen_9pfs_front_remove(struct xenbus_device *dev)
 {
        struct xen_9pfs_front_priv *priv = dev_get_drvdata(&dev->dev);
 
        dev_set_drvdata(&dev->dev, NULL);
        xen_9pfs_front_free(priv);
-       return 0;
 }
 
 static int xen_9pfs_front_alloc_dataring(struct xenbus_device *dev,
index d3e542c2fc3e4939cfa4922b5e1722e11be9e4b0..acf563fbdfd951656216cf3d79dd3cae4e3e42f8 100644 (file)
@@ -821,6 +821,7 @@ static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err)
 static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
 {
        struct iso_list_data *d;
+       int ret;
 
        bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis);
 
@@ -831,8 +832,12 @@ static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis)
        d->big = big;
        d->bis = bis;
 
-       return hci_cmd_sync_queue(hdev, terminate_big_sync, d,
-                                 terminate_big_destroy);
+       ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d,
+                                terminate_big_destroy);
+       if (ret)
+               kfree(d);
+
+       return ret;
 }
 
 static int big_terminate_sync(struct hci_dev *hdev, void *data)
@@ -857,6 +862,7 @@ static int big_terminate_sync(struct hci_dev *hdev, void *data)
 static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
 {
        struct iso_list_data *d;
+       int ret;
 
        bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle);
 
@@ -867,8 +873,12 @@ static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle)
        d->big = big;
        d->sync_handle = sync_handle;
 
-       return hci_cmd_sync_queue(hdev, big_terminate_sync, d,
-                                 terminate_big_destroy);
+       ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d,
+                                terminate_big_destroy);
+       if (ret)
+               kfree(d);
+
+       return ret;
 }
 
 /* Cleanup BIS connection
index 0594af4e37cad9733aa52a2035b22d549d408fd3..ad92a4be5851739cba345c629690b9274e154db5 100644 (file)
@@ -3848,8 +3848,11 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data,
                           conn->handle, conn->link);
 
                /* Create CIS if LE is already connected */
-               if (conn->link && conn->link->state == BT_CONNECTED)
+               if (conn->link && conn->link->state == BT_CONNECTED) {
+                       rcu_read_unlock();
                        hci_le_create_cis(conn->link);
+                       rcu_read_lock();
+               }
 
                if (i == rp->num_handles)
                        break;
index 9e2d7e4b850c545cc4d30118247cc47e476ff332..117eedb6f70993a5b58987a7a7bf794aaf8e000e 100644 (file)
@@ -3572,7 +3572,7 @@ static const struct hci_init_stage hci_init2[] = {
 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
 {
        /* Use Read LE Buffer Size V2 if supported */
-       if (hdev->commands[41] & 0x20)
+       if (iso_capable(hdev) && hdev->commands[41] & 0x20)
                return __hci_cmd_sync_status(hdev,
                                             HCI_OP_LE_READ_BUFFER_SIZE_V2,
                                             0, NULL, HCI_CMD_TIMEOUT);
@@ -3597,10 +3597,10 @@ static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
 
 /* LE Controller init stage 2 command sequence */
 static const struct hci_init_stage le_init2[] = {
-       /* HCI_OP_LE_READ_BUFFER_SIZE */
-       HCI_INIT(hci_le_read_buffer_size_sync),
        /* HCI_OP_LE_READ_LOCAL_FEATURES */
        HCI_INIT(hci_le_read_local_features_sync),
+       /* HCI_OP_LE_READ_BUFFER_SIZE */
+       HCI_INIT(hci_le_read_buffer_size_sync),
        /* HCI_OP_LE_READ_SUPPORTED_STATES */
        HCI_INIT(hci_le_read_supported_states_sync),
        {}
@@ -6187,20 +6187,13 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
 
 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
 {
-       u8 instance = *(u8 *)data;
-
-       kfree(data);
+       u8 instance = PTR_ERR(data);
 
        return hci_update_adv_data_sync(hdev, instance);
 }
 
 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
 {
-       u8 *inst_ptr = kmalloc(1, GFP_KERNEL);
-
-       if (!inst_ptr)
-               return -ENOMEM;
-
-       *inst_ptr = instance;
-       return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL);
+       return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
+                                 ERR_PTR(instance), NULL);
 }
index 035bb5d25f850a7d427f87b2d5bc960526b3fe0f..24444b502e5865400d4eecbdcf68ace4b9e687b6 100644 (file)
@@ -289,15 +289,15 @@ static int iso_connect_bis(struct sock *sk)
        hci_dev_unlock(hdev);
        hci_dev_put(hdev);
 
+       err = iso_chan_add(conn, sk, NULL);
+       if (err)
+               return err;
+
        lock_sock(sk);
 
        /* Update source addr of the socket */
        bacpy(&iso_pi(sk)->src, &hcon->src);
 
-       err = iso_chan_add(conn, sk, NULL);
-       if (err)
-               goto release;
-
        if (hcon->state == BT_CONNECTED) {
                iso_sock_clear_timer(sk);
                sk->sk_state = BT_CONNECTED;
@@ -306,7 +306,6 @@ static int iso_connect_bis(struct sock *sk)
                iso_sock_set_timer(sk, sk->sk_sndtimeo);
        }
 
-release:
        release_sock(sk);
        return err;
 
@@ -372,15 +371,15 @@ static int iso_connect_cis(struct sock *sk)
        hci_dev_unlock(hdev);
        hci_dev_put(hdev);
 
+       err = iso_chan_add(conn, sk, NULL);
+       if (err)
+               return err;
+
        lock_sock(sk);
 
        /* Update source addr of the socket */
        bacpy(&iso_pi(sk)->src, &hcon->src);
 
-       err = iso_chan_add(conn, sk, NULL);
-       if (err)
-               goto release;
-
        if (hcon->state == BT_CONNECTED) {
                iso_sock_clear_timer(sk);
                sk->sk_state = BT_CONNECTED;
@@ -392,7 +391,6 @@ static int iso_connect_cis(struct sock *sk)
                iso_sock_set_timer(sk, sk->sk_sndtimeo);
        }
 
-release:
        release_sock(sk);
        return err;
 
@@ -895,13 +893,10 @@ static int iso_listen_bis(struct sock *sk)
        if (!hdev)
                return -EHOSTUNREACH;
 
-       hci_dev_lock(hdev);
-
        err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst,
                                 le_addr_type(iso_pi(sk)->dst_type),
                                 iso_pi(sk)->bc_sid);
 
-       hci_dev_unlock(hdev);
        hci_dev_put(hdev);
 
        return err;
@@ -1432,33 +1427,29 @@ static void iso_conn_ready(struct iso_conn *conn)
        struct sock *parent;
        struct sock *sk = conn->sk;
        struct hci_ev_le_big_sync_estabilished *ev;
+       struct hci_conn *hcon;
 
        BT_DBG("conn %p", conn);
 
        if (sk) {
                iso_sock_ready(conn->sk);
        } else {
-               iso_conn_lock(conn);
-
-               if (!conn->hcon) {
-                       iso_conn_unlock(conn);
+               hcon = conn->hcon;
+               if (!hcon)
                        return;
-               }
 
-               ev = hci_recv_event_data(conn->hcon->hdev,
+               ev = hci_recv_event_data(hcon->hdev,
                                         HCI_EVT_LE_BIG_SYNC_ESTABILISHED);
                if (ev)
-                       parent = iso_get_sock_listen(&conn->hcon->src,
-                                                    &conn->hcon->dst,
+                       parent = iso_get_sock_listen(&hcon->src,
+                                                    &hcon->dst,
                                                     iso_match_big, ev);
                else
-                       parent = iso_get_sock_listen(&conn->hcon->src,
+                       parent = iso_get_sock_listen(&hcon->src,
                                                     BDADDR_ANY, NULL, NULL);
 
-               if (!parent) {
-                       iso_conn_unlock(conn);
+               if (!parent)
                        return;
-               }
 
                lock_sock(parent);
 
@@ -1466,30 +1457,29 @@ static void iso_conn_ready(struct iso_conn *conn)
                                    BTPROTO_ISO, GFP_ATOMIC, 0);
                if (!sk) {
                        release_sock(parent);
-                       iso_conn_unlock(conn);
                        return;
                }
 
                iso_sock_init(sk, parent);
 
-               bacpy(&iso_pi(sk)->src, &conn->hcon->src);
-               iso_pi(sk)->src_type = conn->hcon->src_type;
+               bacpy(&iso_pi(sk)->src, &hcon->src);
+               iso_pi(sk)->src_type = hcon->src_type;
 
                /* If hcon has no destination address (BDADDR_ANY) it means it
                 * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to
                 * initialize using the parent socket destination address.
                 */
-               if (!bacmp(&conn->hcon->dst, BDADDR_ANY)) {
-                       bacpy(&conn->hcon->dst, &iso_pi(parent)->dst);
-                       conn->hcon->dst_type = iso_pi(parent)->dst_type;
-                       conn->hcon->sync_handle = iso_pi(parent)->sync_handle;
+               if (!bacmp(&hcon->dst, BDADDR_ANY)) {
+                       bacpy(&hcon->dst, &iso_pi(parent)->dst);
+                       hcon->dst_type = iso_pi(parent)->dst_type;
+                       hcon->sync_handle = iso_pi(parent)->sync_handle;
                }
 
-               bacpy(&iso_pi(sk)->dst, &conn->hcon->dst);
-               iso_pi(sk)->dst_type = conn->hcon->dst_type;
+               bacpy(&iso_pi(sk)->dst, &hcon->dst);
+               iso_pi(sk)->dst_type = hcon->dst_type;
 
-               hci_conn_hold(conn->hcon);
-               __iso_chan_add(conn, sk, parent);
+               hci_conn_hold(hcon);
+               iso_chan_add(conn, sk, parent);
 
                if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags))
                        sk->sk_state = BT_CONNECT2;
@@ -1500,8 +1490,6 @@ static void iso_conn_ready(struct iso_conn *conn)
                parent->sk_data_ready(parent);
 
                release_sock(parent);
-
-               iso_conn_unlock(conn);
        }
 }
 
index 6a8b7e84293dfd436af59a433f4d82e92497867e..bdf978605d5a85bcbee1c0db1a76065e31a6096e 100644 (file)
@@ -27,7 +27,7 @@ struct mgmt_mesh_tx {
        struct sock *sk;
        u8 handle;
        u8 instance;
-       u8 param[sizeof(struct mgmt_cp_mesh_send) + 29];
+       u8 param[sizeof(struct mgmt_cp_mesh_send) + 31];
 };
 
 struct mgmt_pending_cmd {
index 21e24da4847f05a96145815e76e19c3e543a46f0..4397e14ff560fd1ca954bc2fdc08639f6988019c 100644 (file)
@@ -391,6 +391,7 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
            addr->sa_family != AF_BLUETOOTH)
                return -EINVAL;
 
+       sock_hold(sk);
        lock_sock(sk);
 
        if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) {
@@ -410,14 +411,18 @@ static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int a
        d->sec_level = rfcomm_pi(sk)->sec_level;
        d->role_switch = rfcomm_pi(sk)->role_switch;
 
+       /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */
+       release_sock(sk);
        err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr,
                              sa->rc_channel);
-       if (!err)
+       lock_sock(sk);
+       if (!err && !sock_flag(sk, SOCK_ZAPPED))
                err = bt_sock_wait_state(sk, BT_CONNECTED,
                                sock_sndtimeo(sk, flags & O_NONBLOCK));
 
 done:
        release_sock(sk);
+       sock_put(sk);
        return err;
 }
 
index cc405d8c7c303f301303a6c643cb22b384713461..8480684f276251bd38f1d930ee71e1019c6c537c 100644 (file)
@@ -269,11 +269,15 @@ int cfctrl_linkup_request(struct cflayer *layer,
        default:
                pr_warn("Request setup of bad link type = %d\n",
                        param->linktype);
+               cfpkt_destroy(pkt);
                return -EINVAL;
        }
        req = kzalloc(sizeof(*req), GFP_KERNEL);
-       if (!req)
+       if (!req) {
+               cfpkt_destroy(pkt);
                return -ENOMEM;
+       }
+
        req->client_layer = user_layer;
        req->cmd = CFCTRL_CMD_LINK_SETUP;
        req->param = *param;
index 929358677183d5827b7cf2c54700cc34eb36544d..43cc1fe58a2c6e41d05db12ec6e992b8bf62a15e 100644 (file)
@@ -3180,15 +3180,18 @@ static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
 
 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
 {
+       void *old_data;
+
        /* skb_ensure_writable() is not needed here, as we're
         * already working on an uncloned skb.
         */
        if (unlikely(!pskb_may_pull(skb, off + len)))
                return -ENOMEM;
 
-       skb_postpull_rcsum(skb, skb->data + off, len);
-       memmove(skb->data + len, skb->data, off);
+       old_data = skb->data;
        __skb_pull(skb, len);
+       skb_postpull_rcsum(skb, old_data + off, len);
+       memmove(skb->data, old_data, off);
 
        return 0;
 }
index fd8c6a7e8d3e2e6b439109d0089f44a547c7347e..506f83d715f873c9bc3727e28ace71e00fa79d2f 100644 (file)
@@ -505,8 +505,9 @@ found_ptype:
        NAPI_GRO_CB(skb)->count = 1;
        if (unlikely(skb_is_gso(skb))) {
                NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
-               /* Only support TCP at the moment. */
-               if (!skb_is_gso_tcp(skb))
+               /* Only support TCP and non DODGY users. */
+               if (!skb_is_gso_tcp(skb) ||
+                   (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
                        NAPI_GRO_CB(skb)->flush = 1;
        }
 
index c2f1a542e6fa9ccd94ba9c63f707c78439405921..646b3e490c71a748f9612081971f36e7e449fc1a 100644 (file)
@@ -2078,58 +2078,91 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
        return ret;
 }
 
-static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+static int ethtool_vzalloc_stats_array(int n_stats, u64 **data)
 {
+       if (n_stats < 0)
+               return n_stats;
+       if (n_stats > S32_MAX / sizeof(u64))
+               return -ENOMEM;
+       if (WARN_ON_ONCE(!n_stats))
+               return -EOPNOTSUPP;
+
+       *data = vzalloc(array_size(n_stats, sizeof(u64)));
+       if (!*data)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static int ethtool_get_phy_stats_phydev(struct phy_device *phydev,
+                                        struct ethtool_stats *stats,
+                                        u64 **data)
+ {
        const struct ethtool_phy_ops *phy_ops = ethtool_phy_ops;
+       int n_stats, ret;
+
+       if (!phy_ops || !phy_ops->get_sset_count || !phy_ops->get_stats)
+               return -EOPNOTSUPP;
+
+       n_stats = phy_ops->get_sset_count(phydev);
+
+       ret = ethtool_vzalloc_stats_array(n_stats, data);
+       if (ret)
+               return ret;
+
+       stats->n_stats = n_stats;
+       return phy_ops->get_stats(phydev, stats, *data);
+}
+
+static int ethtool_get_phy_stats_ethtool(struct net_device *dev,
+                                         struct ethtool_stats *stats,
+                                         u64 **data)
+{
        const struct ethtool_ops *ops = dev->ethtool_ops;
-       struct phy_device *phydev = dev->phydev;
-       struct ethtool_stats stats;
-       u64 *data;
-       int ret, n_stats;
+       int n_stats, ret;
 
-       if (!phydev && (!ops->get_ethtool_phy_stats || !ops->get_sset_count))
+       if (!ops || !ops->get_sset_count || ops->get_ethtool_phy_stats)
                return -EOPNOTSUPP;
 
-       if (phydev && !ops->get_ethtool_phy_stats &&
-           phy_ops && phy_ops->get_sset_count)
-               n_stats = phy_ops->get_sset_count(phydev);
-       else
-               n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
-       if (n_stats < 0)
-               return n_stats;
-       if (n_stats > S32_MAX / sizeof(u64))
-               return -ENOMEM;
-       WARN_ON_ONCE(!n_stats);
+       n_stats = ops->get_sset_count(dev, ETH_SS_PHY_STATS);
+
+       ret = ethtool_vzalloc_stats_array(n_stats, data);
+       if (ret)
+               return ret;
+
+       stats->n_stats = n_stats;
+       ops->get_ethtool_phy_stats(dev, stats, *data);
+
+       return 0;
+}
+
+static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
+{
+       struct phy_device *phydev = dev->phydev;
+       struct ethtool_stats stats;
+       u64 *data = NULL;
+       int ret = -EOPNOTSUPP;
 
        if (copy_from_user(&stats, useraddr, sizeof(stats)))
                return -EFAULT;
 
-       stats.n_stats = n_stats;
+       if (phydev)
+               ret = ethtool_get_phy_stats_phydev(phydev, &stats, &data);
 
-       if (n_stats) {
-               data = vzalloc(array_size(n_stats, sizeof(u64)));
-               if (!data)
-                       return -ENOMEM;
+       if (ret == -EOPNOTSUPP)
+               ret = ethtool_get_phy_stats_ethtool(dev, &stats, &data);
 
-               if (phydev && !ops->get_ethtool_phy_stats &&
-                   phy_ops && phy_ops->get_stats) {
-                       ret = phy_ops->get_stats(phydev, &stats, data);
-                       if (ret < 0)
-                               goto out;
-               } else {
-                       ops->get_ethtool_phy_stats(dev, &stats, data);
-               }
-       } else {
-               data = NULL;
-       }
+       if (ret)
+               goto out;
 
-       ret = -EFAULT;
-       if (copy_to_user(useraddr, &stats, sizeof(stats)))
+       if (copy_to_user(useraddr, &stats, sizeof(stats))) {
+               ret = -EFAULT;
                goto out;
+       }
+
        useraddr += sizeof(stats);
-       if (n_stats && copy_to_user(useraddr, data, array_size(n_stats, sizeof(u64))))
-               goto out;
-       ret = 0;
+       if (copy_to_user(useraddr, data, array_size(stats.n_stats, sizeof(u64))))
+               ret = -EFAULT;
 
  out:
        vfree(data);
index ebe6145aed3f1a79d30d17cb221c55cdcee9134a..be260ab34e5801ab03ad7ad6306f6037ee5c5101 100644 (file)
@@ -122,10 +122,13 @@ rss_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base,
 {
        const struct rss_reply_data *data = RSS_REPDATA(reply_base);
 
-       if (nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc) ||
-           nla_put(skb, ETHTOOL_A_RSS_INDIR,
-                   sizeof(u32) * data->indir_size, data->indir_table) ||
-           nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey))
+       if ((data->hfunc &&
+            nla_put_u32(skb, ETHTOOL_A_RSS_HFUNC, data->hfunc)) ||
+           (data->indir_size &&
+            nla_put(skb, ETHTOOL_A_RSS_INDIR,
+                    sizeof(u32) * data->indir_size, data->indir_table)) ||
+           (data->hkey_size &&
+            nla_put(skb, ETHTOOL_A_RSS_HKEY, data->hkey_size, data->hkey)))
                return -EMSGSIZE;
 
        return 0;
index ab4a06be489b5d410cec603bf56248d31dbc90dd..6c0ec27899431eb56e2f9d0c3a936b77f44ccaca 100644 (file)
@@ -1665,6 +1665,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
        if (rc == 0) {
                *sk = sock->sk;
                (*sk)->sk_allocation = GFP_ATOMIC;
+               (*sk)->sk_use_task_frag = false;
                /*
                 * Unhash it so that IP input processing does not even see it,
                 * we do not wish this socket to see incoming packets.
index b366ab9148f2d628c75e85752f6d176125deb93c..d1f837579398302f2b3729163c36b12b34e523e0 100644 (file)
@@ -173,22 +173,40 @@ static bool inet_bind_conflict(const struct sock *sk, struct sock *sk2,
        return false;
 }
 
+static bool __inet_bhash2_conflict(const struct sock *sk, struct sock *sk2,
+                                  kuid_t sk_uid, bool relax,
+                                  bool reuseport_cb_ok, bool reuseport_ok)
+{
+       if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
+               return false;
+
+       return inet_bind_conflict(sk, sk2, sk_uid, relax,
+                                 reuseport_cb_ok, reuseport_ok);
+}
+
 static bool inet_bhash2_conflict(const struct sock *sk,
                                 const struct inet_bind2_bucket *tb2,
                                 kuid_t sk_uid,
                                 bool relax, bool reuseport_cb_ok,
                                 bool reuseport_ok)
 {
+       struct inet_timewait_sock *tw2;
        struct sock *sk2;
 
        sk_for_each_bound_bhash2(sk2, &tb2->owners) {
-               if (sk->sk_family == AF_INET && ipv6_only_sock(sk2))
-                       continue;
+               if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+                                          reuseport_cb_ok, reuseport_ok))
+                       return true;
+       }
 
-               if (inet_bind_conflict(sk, sk2, sk_uid, relax,
-                                      reuseport_cb_ok, reuseport_ok))
+       twsk_for_each_bound_bhash2(tw2, &tb2->deathrow) {
+               sk2 = (struct sock *)tw2;
+
+               if (__inet_bhash2_conflict(sk, sk2, sk_uid, relax,
+                                          reuseport_cb_ok, reuseport_ok))
                        return true;
        }
+
        return false;
 }
 
@@ -1182,12 +1200,26 @@ void inet_csk_prepare_forced_close(struct sock *sk)
 }
 EXPORT_SYMBOL(inet_csk_prepare_forced_close);
 
+static int inet_ulp_can_listen(const struct sock *sk)
+{
+       const struct inet_connection_sock *icsk = inet_csk(sk);
+
+       if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone)
+               return -EINVAL;
+
+       return 0;
+}
+
 int inet_csk_listen_start(struct sock *sk)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_sock *inet = inet_sk(sk);
        int err;
 
+       err = inet_ulp_can_listen(sk);
+       if (unlikely(err))
+               return err;
+
        reqsk_queue_alloc(&icsk->icsk_accept_queue);
 
        sk->sk_ack_backlog = 0;
index d039b4e732a313a0075519ee1c65b23bf9f6d4fe..f58d73888638b916904083af542a1e921fc0467a 100644 (file)
@@ -116,6 +116,7 @@ static void inet_bind2_bucket_init(struct inet_bind2_bucket *tb,
 #endif
                tb->rcv_saddr = sk->sk_rcv_saddr;
        INIT_HLIST_HEAD(&tb->owners);
+       INIT_HLIST_HEAD(&tb->deathrow);
        hlist_add_head(&tb->node, &head->chain);
 }
 
@@ -137,7 +138,7 @@ struct inet_bind2_bucket *inet_bind2_bucket_create(struct kmem_cache *cachep,
 /* Caller must hold hashbucket lock for this tb with local BH disabled */
 void inet_bind2_bucket_destroy(struct kmem_cache *cachep, struct inet_bind2_bucket *tb)
 {
-       if (hlist_empty(&tb->owners)) {
+       if (hlist_empty(&tb->owners) && hlist_empty(&tb->deathrow)) {
                __hlist_del(&tb->node);
                kmem_cache_free(cachep, tb);
        }
@@ -649,8 +650,20 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
        spin_lock(lock);
        if (osk) {
                WARN_ON_ONCE(sk->sk_hash != osk->sk_hash);
-               ret = sk_nulls_del_node_init_rcu(osk);
-       } else if (found_dup_sk) {
+               ret = sk_hashed(osk);
+               if (ret) {
+                       /* Before deleting the node, we insert a new one to make
+                        * sure that the look-up-sk process would not miss either
+                        * of them and that at least one node would exist in ehash
+                        * table all the time. Otherwise there's a tiny chance
+                        * that lookup process could find nothing in ehash table.
+                        */
+                       __sk_nulls_add_node_tail_rcu(sk, list);
+                       sk_nulls_del_node_init_rcu(osk);
+               }
+               goto unlock;
+       }
+       if (found_dup_sk) {
                *found_dup_sk = inet_ehash_lookup_by_sk(sk, list);
                if (*found_dup_sk)
                        ret = false;
@@ -659,6 +672,7 @@ bool inet_ehash_insert(struct sock *sk, struct sock *osk, bool *found_dup_sk)
        if (ret)
                __sk_nulls_add_node_rcu(sk, list);
 
+unlock:
        spin_unlock(lock);
 
        return ret;
@@ -1103,15 +1117,16 @@ ok:
        /* Head lock still held and bh's disabled */
        inet_bind_hash(sk, tb, tb2, port);
 
-       spin_unlock(&head2->lock);
-
        if (sk_unhashed(sk)) {
                inet_sk(sk)->inet_sport = htons(port);
                inet_ehash_nolisten(sk, (struct sock *)tw, NULL);
        }
        if (tw)
                inet_twsk_bind_unhash(tw, hinfo);
+
+       spin_unlock(&head2->lock);
        spin_unlock(&head->lock);
+
        if (tw)
                inet_twsk_deschedule_put(tw);
        local_bh_enable();
index 66fc940f9521abb5c9f83b70cdb482afa1ea6ede..beed32fff484183f21e9d20834d67474e4565076 100644 (file)
@@ -29,6 +29,7 @@
 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
                          struct inet_hashinfo *hashinfo)
 {
+       struct inet_bind2_bucket *tb2 = tw->tw_tb2;
        struct inet_bind_bucket *tb = tw->tw_tb;
 
        if (!tb)
@@ -37,6 +38,11 @@ void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
        __hlist_del(&tw->tw_bind_node);
        tw->tw_tb = NULL;
        inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
+
+       __hlist_del(&tw->tw_bind2_node);
+       tw->tw_tb2 = NULL;
+       inet_bind2_bucket_destroy(hashinfo->bind2_bucket_cachep, tb2);
+
        __sock_put((struct sock *)tw);
 }
 
@@ -45,7 +51,7 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
 {
        struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
        spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
-       struct inet_bind_hashbucket *bhead;
+       struct inet_bind_hashbucket *bhead, *bhead2;
 
        spin_lock(lock);
        sk_nulls_del_node_init_rcu((struct sock *)tw);
@@ -54,9 +60,13 @@ static void inet_twsk_kill(struct inet_timewait_sock *tw)
        /* Disassociate with bind bucket. */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
                        hashinfo->bhash_size)];
+       bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
+                                      twsk_net(tw), tw->tw_num);
 
        spin_lock(&bhead->lock);
+       spin_lock(&bhead2->lock);
        inet_twsk_bind_unhash(tw, hashinfo);
+       spin_unlock(&bhead2->lock);
        spin_unlock(&bhead->lock);
 
        refcount_dec(&tw->tw_dr->tw_refcount);
@@ -81,10 +91,10 @@ void inet_twsk_put(struct inet_timewait_sock *tw)
 }
 EXPORT_SYMBOL_GPL(inet_twsk_put);
 
-static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
-                                  struct hlist_nulls_head *list)
+static void inet_twsk_add_node_tail_rcu(struct inet_timewait_sock *tw,
+                                       struct hlist_nulls_head *list)
 {
-       hlist_nulls_add_head_rcu(&tw->tw_node, list);
+       hlist_nulls_add_tail_rcu(&tw->tw_node, list);
 }
 
 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
@@ -93,6 +103,12 @@ static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
        hlist_add_head(&tw->tw_bind_node, list);
 }
 
+static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
+                                    struct hlist_head *list)
+{
+       hlist_add_head(&tw->tw_bind2_node, list);
+}
+
 /*
  * Enter the time wait state. This is called with locally disabled BH.
  * Essentially we whip up a timewait bucket, copy the relevant info into it
@@ -105,22 +121,33 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
        spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
-       struct inet_bind_hashbucket *bhead;
+       struct inet_bind_hashbucket *bhead, *bhead2;
+
        /* Step 1: Put TW into bind hash. Original socket stays there too.
           Note, that any socket with inet->num != 0 MUST be bound in
           binding cache, even if it is closed.
         */
        bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
                        hashinfo->bhash_size)];
+       bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
+
        spin_lock(&bhead->lock);
+       spin_lock(&bhead2->lock);
+
        tw->tw_tb = icsk->icsk_bind_hash;
        WARN_ON(!icsk->icsk_bind_hash);
        inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
+
+       tw->tw_tb2 = icsk->icsk_bind2_hash;
+       WARN_ON(!icsk->icsk_bind2_hash);
+       inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
+
+       spin_unlock(&bhead2->lock);
        spin_unlock(&bhead->lock);
 
        spin_lock(lock);
 
-       inet_twsk_add_node_rcu(tw, &ehead->chain);
+       inet_twsk_add_node_tail_rcu(tw, &ehead->chain);
 
        /* Step 3: Remove SK from hash chain */
        if (__sk_nulls_del_node_init_rcu(sk))
index c567d5e8053e0c2668196d8a0c9afc6494d8f972..33f559f491c8cfd1a66a6f58c7b4a8a19ffa6b4f 100644 (file)
@@ -435,6 +435,7 @@ void tcp_init_sock(struct sock *sk)
 
        /* There's a bubble in the pipe until at least the first ACK. */
        tp->app_limited = ~0U;
+       tp->rate_app_limited = 1;
 
        /* See draft-stevens-tcpca-spec-01 for discussion of the
         * initialization of these values.
@@ -3178,6 +3179,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->plb_rehash = 0;
        /* There's a bubble in the pipe until at least the first ACK. */
        tp->app_limited = ~0U;
+       tp->rate_app_limited = 1;
        tp->rack.mstamp = 0;
        tp->rack.advanced = 0;
        tp->rack.reo_wnd_steps = 1;
index 9ae50b1bd8444163acedd6f8df70afbb3583bc4b..2aa442128630e5fbc48d72c2f09f7eba475cc62c 100644 (file)
@@ -139,6 +139,10 @@ static int __tcp_set_ulp(struct sock *sk, const struct tcp_ulp_ops *ulp_ops)
        if (sk->sk_socket)
                clear_bit(SOCK_SUPPORT_ZC, &sk->sk_socket->flags);
 
+       err = -ENOTCONN;
+       if (!ulp_ops->clone && sk->sk_state == TCP_LISTEN)
+               goto out_err;
+
        err = ulp_ops->init(sk);
        if (err)
                goto out_err;
index a06a9f847db5c875ea047959cda597ff2d5bcea3..ada087b50541a6f67b9b47f6f98408a96f6a6250 100644 (file)
@@ -505,6 +505,7 @@ csum_copy_err:
 static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
                                     struct raw6_sock *rp)
 {
+       struct ipv6_txoptions *opt;
        struct sk_buff *skb;
        int err = 0;
        int offset;
@@ -522,6 +523,9 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
 
        offset = rp->offset;
        total_len = inet_sk(sk)->cork.base.length;
+       opt = inet6_sk(sk)->cork.opt;
+       total_len -= opt ? opt->opt_flen : 0;
+
        if (offset >= total_len - 1) {
                err = -EINVAL;
                ip6_flush_pending_frames(sk);
index 9a1415fe3fa78bf419efc86c4a6ccfc7e0f0c78b..03608d3ded4b83d1e59e064e482f54cffcdf5240 100644 (file)
@@ -104,9 +104,9 @@ static struct workqueue_struct *l2tp_wq;
 /* per-net private data for this module */
 static unsigned int l2tp_net_id;
 struct l2tp_net {
-       struct list_head l2tp_tunnel_list;
-       /* Lock for write access to l2tp_tunnel_list */
-       spinlock_t l2tp_tunnel_list_lock;
+       /* Lock for write access to l2tp_tunnel_idr */
+       spinlock_t l2tp_tunnel_idr_lock;
+       struct idr l2tp_tunnel_idr;
        struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
        /* Lock for write access to l2tp_session_hlist */
        spinlock_t l2tp_session_hlist_lock;
@@ -208,13 +208,10 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id)
        struct l2tp_tunnel *tunnel;
 
        rcu_read_lock_bh();
-       list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-               if (tunnel->tunnel_id == tunnel_id &&
-                   refcount_inc_not_zero(&tunnel->ref_count)) {
-                       rcu_read_unlock_bh();
-
-                       return tunnel;
-               }
+       tunnel = idr_find(&pn->l2tp_tunnel_idr, tunnel_id);
+       if (tunnel && refcount_inc_not_zero(&tunnel->ref_count)) {
+               rcu_read_unlock_bh();
+               return tunnel;
        }
        rcu_read_unlock_bh();
 
@@ -224,13 +221,14 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_get);
 
 struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth)
 {
-       const struct l2tp_net *pn = l2tp_pernet(net);
+       struct l2tp_net *pn = l2tp_pernet(net);
+       unsigned long tunnel_id, tmp;
        struct l2tp_tunnel *tunnel;
        int count = 0;
 
        rcu_read_lock_bh();
-       list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-               if (++count > nth &&
+       idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+               if (tunnel && ++count > nth &&
                    refcount_inc_not_zero(&tunnel->ref_count)) {
                        rcu_read_unlock_bh();
                        return tunnel;
@@ -1043,7 +1041,7 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, uns
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED);
        nf_reset_ct(skb);
 
-       bh_lock_sock(sk);
+       bh_lock_sock_nested(sk);
        if (sock_owned_by_user(sk)) {
                kfree_skb(skb);
                ret = NET_XMIT_DROP;
@@ -1227,6 +1225,15 @@ static void l2tp_udp_encap_destroy(struct sock *sk)
                l2tp_tunnel_delete(tunnel);
 }
 
+static void l2tp_tunnel_remove(struct net *net, struct l2tp_tunnel *tunnel)
+{
+       struct l2tp_net *pn = l2tp_pernet(net);
+
+       spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+       idr_remove(&pn->l2tp_tunnel_idr, tunnel->tunnel_id);
+       spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+}
+
 /* Workqueue tunnel deletion function */
 static void l2tp_tunnel_del_work(struct work_struct *work)
 {
@@ -1234,7 +1241,6 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
                                                  del_work);
        struct sock *sk = tunnel->sock;
        struct socket *sock = sk->sk_socket;
-       struct l2tp_net *pn;
 
        l2tp_tunnel_closeall(tunnel);
 
@@ -1248,12 +1254,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
                }
        }
 
-       /* Remove the tunnel struct from the tunnel list */
-       pn = l2tp_pernet(tunnel->l2tp_net);
-       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
-       list_del_rcu(&tunnel->list);
-       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-
+       l2tp_tunnel_remove(tunnel->l2tp_net, tunnel);
        /* drop initial ref */
        l2tp_tunnel_dec_refcount(tunnel);
 
@@ -1384,8 +1385,6 @@ out:
        return err;
 }
 
-static struct lock_class_key l2tp_socket_class;
-
 int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id,
                       struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
 {
@@ -1455,12 +1454,19 @@ static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
 int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
                         struct l2tp_tunnel_cfg *cfg)
 {
-       struct l2tp_tunnel *tunnel_walk;
-       struct l2tp_net *pn;
+       struct l2tp_net *pn = l2tp_pernet(net);
+       u32 tunnel_id = tunnel->tunnel_id;
        struct socket *sock;
        struct sock *sk;
        int ret;
 
+       spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+       ret = idr_alloc_u32(&pn->l2tp_tunnel_idr, NULL, &tunnel_id, tunnel_id,
+                           GFP_ATOMIC);
+       spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
+       if (ret)
+               return ret == -ENOSPC ? -EEXIST : ret;
+
        if (tunnel->fd < 0) {
                ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
                                              tunnel->peer_tunnel_id, cfg,
@@ -1474,6 +1480,7 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
        }
 
        sk = sock->sk;
+       lock_sock(sk);
        write_lock_bh(&sk->sk_callback_lock);
        ret = l2tp_validate_socket(sk, net, tunnel->encap);
        if (ret < 0)
@@ -1481,24 +1488,6 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
        rcu_assign_sk_user_data(sk, tunnel);
        write_unlock_bh(&sk->sk_callback_lock);
 
-       tunnel->l2tp_net = net;
-       pn = l2tp_pernet(net);
-
-       sock_hold(sk);
-       tunnel->sock = sk;
-
-       spin_lock_bh(&pn->l2tp_tunnel_list_lock);
-       list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
-               if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
-                       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-                       sock_put(sk);
-                       ret = -EEXIST;
-                       goto err_sock;
-               }
-       }
-       list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
-       spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
-
        if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
                struct udp_tunnel_sock_cfg udp_cfg = {
                        .sk_user_data = tunnel,
@@ -1512,9 +1501,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
 
        tunnel->old_sk_destruct = sk->sk_destruct;
        sk->sk_destruct = &l2tp_tunnel_destruct;
-       lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
-                                  "l2tp_sock");
        sk->sk_allocation = GFP_ATOMIC;
+       release_sock(sk);
+
+       sock_hold(sk);
+       tunnel->sock = sk;
+       tunnel->l2tp_net = net;
+
+       spin_lock_bh(&pn->l2tp_tunnel_idr_lock);
+       idr_replace(&pn->l2tp_tunnel_idr, tunnel, tunnel->tunnel_id);
+       spin_unlock_bh(&pn->l2tp_tunnel_idr_lock);
 
        trace_register_tunnel(tunnel);
 
@@ -1523,17 +1519,16 @@ int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
 
        return 0;
 
-err_sock:
-       write_lock_bh(&sk->sk_callback_lock);
-       rcu_assign_sk_user_data(sk, NULL);
 err_inval_sock:
        write_unlock_bh(&sk->sk_callback_lock);
+       release_sock(sk);
 
        if (tunnel->fd < 0)
                sock_release(sock);
        else
                sockfd_put(sock);
 err:
+       l2tp_tunnel_remove(net, tunnel);
        return ret;
 }
 EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
@@ -1647,8 +1642,8 @@ static __net_init int l2tp_init_net(struct net *net)
        struct l2tp_net *pn = net_generic(net, l2tp_net_id);
        int hash;
 
-       INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
-       spin_lock_init(&pn->l2tp_tunnel_list_lock);
+       idr_init(&pn->l2tp_tunnel_idr);
+       spin_lock_init(&pn->l2tp_tunnel_idr_lock);
 
        for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
                INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
@@ -1662,11 +1657,13 @@ static __net_exit void l2tp_exit_net(struct net *net)
 {
        struct l2tp_net *pn = l2tp_pernet(net);
        struct l2tp_tunnel *tunnel = NULL;
+       unsigned long tunnel_id, tmp;
        int hash;
 
        rcu_read_lock_bh();
-       list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
-               l2tp_tunnel_delete(tunnel);
+       idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) {
+               if (tunnel)
+                       l2tp_tunnel_delete(tunnel);
        }
        rcu_read_unlock_bh();
 
@@ -1676,6 +1673,7 @@ static __net_exit void l2tp_exit_net(struct net *net)
 
        for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
                WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash]));
+       idr_destroy(&pn->l2tp_tunnel_idr);
 }
 
 static struct pernet_operations l2tp_net_ops = {
index 9c40f8d3bce8cbb3bad8710b8f889a4ed02b3a21..f9514bacbd4a184d677226ce65fd7a0635fcb5d1 100644 (file)
@@ -491,7 +491,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
 {
        struct tid_ampdu_tx *tid_tx;
        struct ieee80211_local *local = sta->local;
-       struct ieee80211_sub_if_data *sdata = sta->sdata;
+       struct ieee80211_sub_if_data *sdata;
        struct ieee80211_ampdu_params params = {
                .sta = &sta->sta,
                .action = IEEE80211_AMPDU_TX_START,
@@ -511,8 +511,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
         */
        clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
 
-       ieee80211_agg_stop_txq(sta, tid);
-
        /*
         * Make sure no packets are being processed. This ensures that
         * we have a valid starting sequence number and that in-flight
@@ -521,6 +519,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
         */
        synchronize_net();
 
+       sdata = sta->sdata;
        params.ssn = sta->tid_seq[tid] >> 4;
        ret = drv_ampdu_action(local, sdata, &params);
        tid_tx->ssn = params.ssn;
@@ -534,6 +533,9 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
                 */
                set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state);
        } else if (ret) {
+               if (!sdata)
+                       return;
+
                ht_dbg(sdata,
                       "BA request denied - HW unavailable for %pM tid %d\n",
                       sta->sta.addr, tid);
index 8f9a2ab502b38e9f95ae954b3d087acd7e9a1456..672eff6f5d328de2d6e09cf9909f5e1923dbc8a2 100644 (file)
@@ -147,6 +147,7 @@ static int ieee80211_set_ap_mbssid_options(struct ieee80211_sub_if_data *sdata,
        link_conf->bssid_index = 0;
        link_conf->nontransmitted = false;
        link_conf->ema_ap = false;
+       link_conf->bssid_indicator = 0;
 
        if (sdata->vif.type != NL80211_IFTYPE_AP || !params.tx_wdev)
                return -EINVAL;
@@ -1511,6 +1512,12 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
        kfree(link_conf->ftmr_params);
        link_conf->ftmr_params = NULL;
 
+       sdata->vif.mbssid_tx_vif = NULL;
+       link_conf->bssid_index = 0;
+       link_conf->nontransmitted = false;
+       link_conf->ema_ap = false;
+       link_conf->bssid_indicator = 0;
+
        __sta_info_flush(sdata, true);
        ieee80211_free_keys(sdata, true);
 
index 7a3d7893e19d6a607c0c1a20ecec6683c6d51b38..f1914bf39f0e61f37c623eccaf9e62d3ab39b528 100644 (file)
@@ -167,7 +167,7 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
                        continue;
                txqi = to_txq_info(sta->sta.txq[i]);
                p += scnprintf(p, bufsz + buf - p,
-                              "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s)\n",
+                              "%d %d %u %u %u %u %u %u %u %u %u 0x%lx(%s%s%s%s)\n",
                               txqi->txq.tid,
                               txqi->txq.ac,
                               txqi->tin.backlog_bytes,
@@ -182,7 +182,8 @@ static ssize_t sta_aqm_read(struct file *file, char __user *userbuf,
                               txqi->flags,
                               test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ? "STOP" : "RUN",
                               test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags) ? " AMPDU" : "",
-                              test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "");
+                              test_bit(IEEE80211_TXQ_NO_AMSDU, &txqi->flags) ? " NO-AMSDU" : "",
+                              test_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ? " DIRTY" : "");
        }
 
        rcu_read_unlock();
index d737db4e07e2436de3b47ccb98be943ad7afc1c5..cfb09e4aed4d3e8abd47bb78e05fbbedf4f42df2 100644 (file)
@@ -392,6 +392,9 @@ int drv_ampdu_action(struct ieee80211_local *local,
 
        might_sleep();
 
+       if (!sdata)
+               return -EIO;
+
        sdata = get_bss_sdata(sdata);
        if (!check_sdata_in_driver(sdata))
                return -EIO;
index 809bad53e15b67d63c9d953aacc60e16f2fa57d6..5d13a3dfd3664bb3820b4a9baa42b23f087d95de 100644 (file)
@@ -1199,7 +1199,7 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
 
        /* In reconfig don't transmit now, but mark for waking later */
        if (local->in_reconfig) {
-               set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
+               set_bit(IEEE80211_TXQ_DIRTY, &txq->flags);
                return;
        }
 
index 83bc41346ae7fe110d5cfa164cf8f4276579fe26..5315ab750280278787e462f8a5861013d2c42f4d 100644 (file)
@@ -391,6 +391,37 @@ void ieee80211_ba_session_work(struct work_struct *work)
 
                tid_tx = sta->ampdu_mlme.tid_start_tx[tid];
                if (!blocked && tid_tx) {
+                       struct txq_info *txqi = to_txq_info(sta->sta.txq[tid]);
+                       struct ieee80211_sub_if_data *sdata =
+                               vif_to_sdata(txqi->txq.vif);
+                       struct fq *fq = &sdata->local->fq;
+
+                       spin_lock_bh(&fq->lock);
+
+                       /* Allow only frags to be dequeued */
+                       set_bit(IEEE80211_TXQ_STOP, &txqi->flags);
+
+                       if (!skb_queue_empty(&txqi->frags)) {
+                               /* Fragmented Tx is ongoing, wait for it to
+                                * finish. Reschedule worker to retry later.
+                                */
+
+                               spin_unlock_bh(&fq->lock);
+                               spin_unlock_bh(&sta->lock);
+
+                               /* Give the task working on the txq a chance
+                                * to send out the queued frags
+                                */
+                               synchronize_net();
+
+                               mutex_unlock(&sta->ampdu_mlme.mtx);
+
+                               ieee80211_queue_work(&sdata->local->hw, work);
+                               return;
+                       }
+
+                       spin_unlock_bh(&fq->lock);
+
                        /*
                         * Assign it over to the normal tid_tx array
                         * where it "goes live".
index 63ff0d2524b6530bcd0b27649ecf4492d70ea10f..d16606e84e22de34e47945cd5f1376b2639c4b70 100644 (file)
@@ -838,7 +838,7 @@ enum txq_info_flags {
        IEEE80211_TXQ_STOP,
        IEEE80211_TXQ_AMPDU,
        IEEE80211_TXQ_NO_AMSDU,
-       IEEE80211_TXQ_STOP_NETIF_TX,
+       IEEE80211_TXQ_DIRTY,
 };
 
 /**
index d49a5906a94351f37644a3ccca4eba90379ee591..23ed13f150675d1ffa869796c857f5905fb8dae8 100644 (file)
@@ -364,7 +364,9 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
 
                        /* No support for VLAN with MLO yet */
                        if (iftype == NL80211_IFTYPE_AP_VLAN &&
-                           nsdata->wdev.use_4addr)
+                           sdata->wdev.use_4addr &&
+                           nsdata->vif.type == NL80211_IFTYPE_AP &&
+                           nsdata->vif.valid_links)
                                return -EOPNOTSUPP;
 
                        /*
@@ -2195,7 +2197,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = cfg80211_register_netdevice(ndev);
                if (ret) {
-                       ieee80211_if_free(ndev);
                        free_netdev(ndev);
                        return ret;
                }
index 7e3ab6e1b28f3d5c4547e97fbbabc42a2e83adbc..c6562a6d25035765a4c0d7cb353da894afbbbaed 100644 (file)
@@ -4049,6 +4049,58 @@ static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
 #undef CALL_RXH
 }
 
+static bool
+ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
+{
+       if (!sta->mlo)
+               return false;
+
+       return !!(sta->valid_links & BIT(link_id));
+}
+
+static bool ieee80211_rx_data_set_link(struct ieee80211_rx_data *rx,
+                                      u8 link_id)
+{
+       rx->link_id = link_id;
+       rx->link = rcu_dereference(rx->sdata->link[link_id]);
+
+       if (!rx->sta)
+               return rx->link;
+
+       if (!ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta, link_id))
+               return false;
+
+       rx->link_sta = rcu_dereference(rx->sta->link[link_id]);
+
+       return rx->link && rx->link_sta;
+}
+
+static bool ieee80211_rx_data_set_sta(struct ieee80211_rx_data *rx,
+                                     struct ieee80211_sta *pubsta,
+                                     int link_id)
+{
+       struct sta_info *sta;
+
+       sta = container_of(pubsta, struct sta_info, sta);
+
+       rx->link_id = link_id;
+       rx->sta = sta;
+
+       if (sta) {
+               rx->local = sta->sdata->local;
+               if (!rx->sdata)
+                       rx->sdata = sta->sdata;
+               rx->link_sta = &sta->deflink;
+       }
+
+       if (link_id < 0)
+               rx->link = &rx->sdata->deflink;
+       else if (!ieee80211_rx_data_set_link(rx, link_id))
+               return false;
+
+       return true;
+}
+
 /*
  * This function makes calls into the RX path, therefore
  * it has to be invoked under RCU read lock.
@@ -4057,16 +4109,19 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
 {
        struct sk_buff_head frames;
        struct ieee80211_rx_data rx = {
-               .sta = sta,
-               .sdata = sta->sdata,
-               .local = sta->local,
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .link_id = -1,
        };
        struct tid_ampdu_rx *tid_agg_rx;
-       u8 link_id;
+       int link_id = -1;
+
+       /* FIXME: statistics won't be right with this */
+       if (sta->sta.valid_links)
+               link_id = ffs(sta->sta.valid_links) - 1;
+
+       if (!ieee80211_rx_data_set_sta(&rx, &sta->sta, link_id))
+               return;
 
        tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
        if (!tid_agg_rx)
@@ -4086,10 +4141,6 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                };
                drv_event_callback(rx.local, rx.sdata, &event);
        }
-       /* FIXME: statistics won't be right with this */
-       link_id = sta->sta.valid_links ? ffs(sta->sta.valid_links) - 1 : 0;
-       rx.link = rcu_dereference(sta->sdata->link[link_id]);
-       rx.link_sta = rcu_dereference(sta->link[link_id]);
 
        ieee80211_rx_handlers(&rx, &frames);
 }
@@ -4105,7 +4156,6 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .link_id = -1,
        };
        int i, diff;
 
@@ -4116,10 +4166,8 @@ void ieee80211_mark_rx_ba_filtered_frames(struct ieee80211_sta *pubsta, u8 tid,
 
        sta = container_of(pubsta, struct sta_info, sta);
 
-       rx.sta = sta;
-       rx.sdata = sta->sdata;
-       rx.link = &rx.sdata->deflink;
-       rx.local = sta->local;
+       if (!ieee80211_rx_data_set_sta(&rx, pubsta, -1))
+               return;
 
        rcu_read_lock();
        tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
@@ -4506,15 +4554,6 @@ void ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->sta_mtx);
 }
 
-static bool
-ieee80211_rx_is_valid_sta_link_id(struct ieee80211_sta *sta, u8 link_id)
-{
-       if (!sta->mlo)
-               return false;
-
-       return !!(sta->valid_links & BIT(link_id));
-}
-
 static void ieee80211_rx_8023(struct ieee80211_rx_data *rx,
                              struct ieee80211_fast_rx *fast_rx,
                              int orig_len)
@@ -4625,7 +4664,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
        struct sk_buff *skb = rx->skb;
        struct ieee80211_hdr *hdr = (void *)skb->data;
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-       struct sta_info *sta = rx->sta;
        int orig_len = skb->len;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
        int snap_offs = hdrlen;
@@ -4637,7 +4675,6 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
                u8 da[ETH_ALEN];
                u8 sa[ETH_ALEN];
        } addrs __aligned(2);
-       struct link_sta_info *link_sta;
        struct ieee80211_sta_rx_stats *stats;
 
        /* for parallel-rx, we need to have DUP_VALIDATED, otherwise we write
@@ -4740,18 +4777,10 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
  drop:
        dev_kfree_skb(skb);
 
-       if (rx->link_id >= 0) {
-               link_sta = rcu_dereference(sta->link[rx->link_id]);
-               if (!link_sta)
-                       return true;
-       } else {
-               link_sta = &sta->deflink;
-       }
-
        if (fast_rx->uses_rss)
-               stats = this_cpu_ptr(link_sta->pcpu_rx_stats);
+               stats = this_cpu_ptr(rx->link_sta->pcpu_rx_stats);
        else
-               stats = &link_sta->rx_stats;
+               stats = &rx->link_sta->rx_stats;
 
        stats->dropped++;
        return true;
@@ -4769,8 +4798,8 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
        struct ieee80211_local *local = rx->local;
        struct ieee80211_sub_if_data *sdata = rx->sdata;
        struct ieee80211_hdr *hdr = (void *)skb->data;
-       struct link_sta_info *link_sta = NULL;
-       struct ieee80211_link_data *link;
+       struct link_sta_info *link_sta = rx->link_sta;
+       struct ieee80211_link_data *link = rx->link;
 
        rx->skb = skb;
 
@@ -4792,35 +4821,6 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
        if (!ieee80211_accept_frame(rx))
                return false;
 
-       if (rx->link_id >= 0) {
-               link = rcu_dereference(rx->sdata->link[rx->link_id]);
-
-               /* we might race link removal */
-               if (!link)
-                       return true;
-               rx->link = link;
-
-               if (rx->sta) {
-                       rx->link_sta =
-                               rcu_dereference(rx->sta->link[rx->link_id]);
-                       if (!rx->link_sta)
-                               return true;
-               }
-       } else {
-               if (rx->sta)
-                       rx->link_sta = &rx->sta->deflink;
-
-               rx->link = &sdata->deflink;
-       }
-
-       if (unlikely(!is_multicast_ether_addr(hdr->addr1) &&
-                    rx->link_id >= 0 && rx->sta && rx->sta->sta.mlo)) {
-               link_sta = rcu_dereference(rx->sta->link[rx->link_id]);
-
-               if (WARN_ON_ONCE(!link_sta))
-                       return true;
-       }
-
        if (!consume) {
                struct skb_shared_hwtstamps *shwt;
 
@@ -4838,9 +4838,12 @@ static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
                 */
                shwt = skb_hwtstamps(rx->skb);
                shwt->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
+
+               /* Update the hdr pointer to the new skb for translation below */
+               hdr = (struct ieee80211_hdr *)rx->skb->data;
        }
 
-       if (unlikely(link_sta)) {
+       if (unlikely(rx->sta && rx->sta->sta.mlo)) {
                /* translate to MLD addresses */
                if (ether_addr_equal(link->conf->addr, hdr->addr1))
                        ether_addr_copy(hdr->addr1, rx->sdata->vif.addr);
@@ -4870,6 +4873,7 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
        struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
        struct ieee80211_fast_rx *fast_rx;
        struct ieee80211_rx_data rx;
+       int link_id = -1;
 
        memset(&rx, 0, sizeof(rx));
        rx.skb = skb;
@@ -4886,12 +4890,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
        if (!pubsta)
                goto drop;
 
-       rx.sta = container_of(pubsta, struct sta_info, sta);
-       rx.sdata = rx.sta->sdata;
-
-       if (status->link_valid &&
-           !ieee80211_rx_is_valid_sta_link_id(pubsta, status->link_id))
-               goto drop;
+       if (status->link_valid)
+               link_id = status->link_id;
 
        /*
         * TODO: Should the frame be dropped if the right link_id is not
@@ -4900,19 +4900,8 @@ static void __ieee80211_rx_handle_8023(struct ieee80211_hw *hw,
         * link_id is used only for stats purpose and updating the stats on
         * the deflink is fine?
         */
-       if (status->link_valid)
-               rx.link_id = status->link_id;
-
-       if (rx.link_id >= 0) {
-               struct ieee80211_link_data *link;
-
-               link =  rcu_dereference(rx.sdata->link[rx.link_id]);
-               if (!link)
-                       goto drop;
-               rx.link = link;
-       } else {
-               rx.link = &rx.sdata->deflink;
-       }
+       if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
+               goto drop;
 
        fast_rx = rcu_dereference(rx.sta->fast_rx);
        if (!fast_rx)
@@ -4930,6 +4919,8 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
 {
        struct link_sta_info *link_sta;
        struct ieee80211_hdr *hdr = (void *)skb->data;
+       struct sta_info *sta;
+       int link_id = -1;
 
        /*
         * Look up link station first, in case there's a
@@ -4939,24 +4930,19 @@ static bool ieee80211_rx_for_interface(struct ieee80211_rx_data *rx,
         */
        link_sta = link_sta_info_get_bss(rx->sdata, hdr->addr2);
        if (link_sta) {
-               rx->sta = link_sta->sta;
-               rx->link_id = link_sta->link_id;
+               sta = link_sta->sta;
+               link_id = link_sta->link_id;
        } else {
                struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
 
-               rx->sta = sta_info_get_bss(rx->sdata, hdr->addr2);
-               if (rx->sta) {
-                       if (status->link_valid &&
-                           !ieee80211_rx_is_valid_sta_link_id(&rx->sta->sta,
-                                                              status->link_id))
-                               return false;
-
-                       rx->link_id = status->link_valid ? status->link_id : -1;
-               } else {
-                       rx->link_id = -1;
-               }
+               sta = sta_info_get_bss(rx->sdata, hdr->addr2);
+               if (status->link_valid)
+                       link_id = status->link_id;
        }
 
+       if (!ieee80211_rx_data_set_sta(rx, &sta->sta, link_id))
+               return false;
+
        return ieee80211_prepare_and_rx_handle(rx, skb, consume);
 }
 
@@ -5015,19 +5001,15 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
 
        if (ieee80211_is_data(fc)) {
                struct sta_info *sta, *prev_sta;
-               u8 link_id = status->link_id;
+               int link_id = -1;
 
-               if (pubsta) {
-                       rx.sta = container_of(pubsta, struct sta_info, sta);
-                       rx.sdata = rx.sta->sdata;
+               if (status->link_valid)
+                       link_id = status->link_id;
 
-                       if (status->link_valid &&
-                           !ieee80211_rx_is_valid_sta_link_id(pubsta, link_id))
+               if (pubsta) {
+                       if (!ieee80211_rx_data_set_sta(&rx, pubsta, link_id))
                                goto out;
 
-                       if (status->link_valid)
-                               rx.link_id = status->link_id;
-
                        /*
                         * In MLO connection, fetch the link_id using addr2
                         * when the driver does not pass link_id in status.
@@ -5045,7 +5027,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
                                if (!link_sta)
                                        goto out;
 
-                               rx.link_id = link_sta->link_id;
+                               ieee80211_rx_data_set_link(&rx, link_sta->link_id);
                        }
 
                        if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
@@ -5061,30 +5043,27 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
                                continue;
                        }
 
-                       if ((status->link_valid &&
-                            !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
-                                                               link_id)) ||
-                           (!status->link_valid && prev_sta->sta.mlo))
+                       rx.sdata = prev_sta->sdata;
+                       if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+                                                      link_id))
+                               goto out;
+
+                       if (!status->link_valid && prev_sta->sta.mlo)
                                continue;
 
-                       rx.link_id = status->link_valid ? link_id : -1;
-                       rx.sta = prev_sta;
-                       rx.sdata = prev_sta->sdata;
                        ieee80211_prepare_and_rx_handle(&rx, skb, false);
 
                        prev_sta = sta;
                }
 
                if (prev_sta) {
-                       if ((status->link_valid &&
-                            !ieee80211_rx_is_valid_sta_link_id(&prev_sta->sta,
-                                                               link_id)) ||
-                           (!status->link_valid && prev_sta->sta.mlo))
+                       rx.sdata = prev_sta->sdata;
+                       if (!ieee80211_rx_data_set_sta(&rx, &prev_sta->sta,
+                                                      link_id))
                                goto out;
 
-                       rx.link_id = status->link_valid ? link_id : -1;
-                       rx.sta = prev_sta;
-                       rx.sdata = prev_sta->sdata;
+                       if (!status->link_valid && prev_sta->sta.mlo)
+                               goto out;
 
                        if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
                                return;
index 2171cd1ca807e8608f1f00a417f8705391e3d634..defe97a31724d81399d844e2fd337444e445351b 100644 (file)
@@ -1129,7 +1129,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
        struct sk_buff *purge_skb = NULL;
 
        if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
-               info->flags |= IEEE80211_TX_CTL_AMPDU;
                reset_agg_timer = true;
        } else if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
                /*
@@ -1161,7 +1160,6 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
                if (!tid_tx) {
                        /* do nothing, let packet pass through */
                } else if (test_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state)) {
-                       info->flags |= IEEE80211_TX_CTL_AMPDU;
                        reset_agg_timer = true;
                } else {
                        queued = true;
@@ -3677,8 +3675,7 @@ static void __ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
        info->band = fast_tx->band;
        info->control.vif = &sdata->vif;
        info->flags = IEEE80211_TX_CTL_FIRST_FRAGMENT |
-                     IEEE80211_TX_CTL_DONTFRAG |
-                     (ampdu ? IEEE80211_TX_CTL_AMPDU : 0);
+                     IEEE80211_TX_CTL_DONTFRAG;
        info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT |
                              u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
                                              IEEE80211_TX_CTRL_MLO_LINK);
@@ -3783,6 +3780,8 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
        struct ieee80211_tx_data tx;
        ieee80211_tx_result r;
        struct ieee80211_vif *vif = txq->vif;
+       int q = vif->hw_queue[txq->ac];
+       bool q_stopped;
 
        WARN_ON_ONCE(softirq_count() == 0);
 
@@ -3790,17 +3789,18 @@ struct sk_buff *ieee80211_tx_dequeue(struct ieee80211_hw *hw,
                return NULL;
 
 begin:
-       spin_lock_bh(&fq->lock);
-
-       if (test_bit(IEEE80211_TXQ_STOP, &txqi->flags) ||
-           test_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags))
-               goto out;
+       spin_lock(&local->queue_stop_reason_lock);
+       q_stopped = local->queue_stop_reasons[q];
+       spin_unlock(&local->queue_stop_reason_lock);
 
-       if (vif->txqs_stopped[txq->ac]) {
-               set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags);
-               goto out;
+       if (unlikely(q_stopped)) {
+               /* mark for waking later */
+               set_bit(IEEE80211_TXQ_DIRTY, &txqi->flags);
+               return NULL;
        }
 
+       spin_lock_bh(&fq->lock);
+
        /* Make sure fragments stay together. */
        skb = __skb_dequeue(&txqi->frags);
        if (unlikely(skb)) {
@@ -3810,6 +3810,9 @@ begin:
                IEEE80211_SKB_CB(skb)->control.flags &=
                        ~IEEE80211_TX_INTCFL_NEED_TXPROCESSING;
        } else {
+               if (unlikely(test_bit(IEEE80211_TXQ_STOP, &txqi->flags)))
+                       goto out;
+
                skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func);
        }
 
@@ -3860,9 +3863,8 @@ begin:
        }
 
        if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
-               info->flags |= IEEE80211_TX_CTL_AMPDU;
-       else
-               info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+               info->flags |= (IEEE80211_TX_CTL_AMPDU |
+                               IEEE80211_TX_CTL_DONTFRAG);
 
        if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
                if (!ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) {
@@ -4596,8 +4598,6 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
 
        info = IEEE80211_SKB_CB(skb);
        memset(info, 0, sizeof(*info));
-       if (tid_tx)
-               info->flags |= IEEE80211_TX_CTL_AMPDU;
 
        info->hw_queue = sdata->vif.hw_queue[queue];
 
index 6f5407038459d03cc3ceaa4ac29a66c3da570a57..261ac667887f88c375038ada1c777ef8b3b26ae3 100644 (file)
@@ -292,22 +292,12 @@ static void wake_tx_push_queue(struct ieee80211_local *local,
                               struct ieee80211_sub_if_data *sdata,
                               struct ieee80211_txq *queue)
 {
-       int q = sdata->vif.hw_queue[queue->ac];
        struct ieee80211_tx_control control = {
                .sta = queue->sta,
        };
        struct sk_buff *skb;
-       unsigned long flags;
-       bool q_stopped;
 
        while (1) {
-               spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
-               q_stopped = local->queue_stop_reasons[q];
-               spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
-
-               if (q_stopped)
-                       break;
-
                skb = ieee80211_tx_dequeue(&local->hw, queue);
                if (!skb)
                        break;
@@ -347,8 +337,6 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
        local_bh_disable();
        spin_lock(&fq->lock);
 
-       sdata->vif.txqs_stopped[ac] = false;
-
        if (!test_bit(SDATA_STATE_RUNNING, &sdata->state))
                goto out;
 
@@ -370,7 +358,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
                        if (ac != txq->ac)
                                continue;
 
-                       if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX,
+                       if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY,
                                                &txqi->flags))
                                continue;
 
@@ -385,7 +373,7 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
 
        txqi = to_txq_info(vif->txq);
 
-       if (!test_and_clear_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txqi->flags) ||
+       if (!test_and_clear_bit(IEEE80211_TXQ_DIRTY, &txqi->flags) ||
            (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
                goto out;
 
@@ -517,8 +505,6 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
                                   bool refcounted)
 {
        struct ieee80211_local *local = hw_to_local(hw);
-       struct ieee80211_sub_if_data *sdata;
-       int n_acs = IEEE80211_NUM_ACS;
 
        trace_stop_queue(local, queue, reason);
 
@@ -530,29 +516,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
        else
                local->q_stop_reasons[queue][reason]++;
 
-       if (__test_and_set_bit(reason, &local->queue_stop_reasons[queue]))
-               return;
-
-       if (local->hw.queues < IEEE80211_NUM_ACS)
-               n_acs = 1;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(sdata, &local->interfaces, list) {
-               int ac;
-
-               if (!sdata->dev)
-                       continue;
-
-               for (ac = 0; ac < n_acs; ac++) {
-                       if (sdata->vif.hw_queue[ac] == queue ||
-                           sdata->vif.cab_queue == queue) {
-                               spin_lock(&local->fq.lock);
-                               sdata->vif.txqs_stopped[ac] = true;
-                               spin_unlock(&local->fq.lock);
-                       }
-               }
-       }
-       rcu_read_unlock();
+       set_bit(reason, &local->queue_stop_reasons[queue]);
 }
 
 void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
index 45e2a48397b959e0a6bb7205c18373bb61334598..70f0ced3ca86e18dd0a63d56bfc2c0ded3b31a33 100644 (file)
@@ -420,6 +420,31 @@ void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk)
        }
 }
 
+/* if sk is ipv4 or ipv6_only allows only same-family local and remote addresses,
+ * otherwise allow any matching local/remote pair
+ */
+bool mptcp_pm_addr_families_match(const struct sock *sk,
+                                 const struct mptcp_addr_info *loc,
+                                 const struct mptcp_addr_info *rem)
+{
+       bool mptcp_is_v4 = sk->sk_family == AF_INET;
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+       bool loc_is_v4 = loc->family == AF_INET || ipv6_addr_v4mapped(&loc->addr6);
+       bool rem_is_v4 = rem->family == AF_INET || ipv6_addr_v4mapped(&rem->addr6);
+
+       if (mptcp_is_v4)
+               return loc_is_v4 && rem_is_v4;
+
+       if (ipv6_only_sock(sk))
+               return !loc_is_v4 && !rem_is_v4;
+
+       return loc_is_v4 == rem_is_v4;
+#else
+       return mptcp_is_v4 && loc->family == AF_INET && rem->family == AF_INET;
+#endif
+}
+
 void mptcp_pm_data_reset(struct mptcp_sock *msk)
 {
        u8 pm_type = mptcp_get_pm_type(sock_net((struct sock *)msk));
index 65dcc55a8ad89f141765f6a4829f3d47bedb2b47..ea6ad9da749303a239f51702905bd11d34d5efed 100644 (file)
@@ -294,6 +294,13 @@ int mptcp_nl_cmd_sf_create(struct sk_buff *skb, struct genl_info *info)
        }
 
        sk = (struct sock *)msk;
+
+       if (!mptcp_pm_addr_families_match(sk, &addr_l, &addr_r)) {
+               GENL_SET_ERR_MSG(info, "families mismatch");
+               err = -EINVAL;
+               goto create_err;
+       }
+
        lock_sock(sk);
 
        err = __mptcp_subflow_connect(sk, &addr_l, &addr_r);
index f6f93957275b800db8702182302d1a8761dfb287..8cd6cc67c2c5cf9c94d9bb041b83058351c30cae 100644 (file)
@@ -98,7 +98,7 @@ static int __mptcp_socket_create(struct mptcp_sock *msk)
        struct socket *ssock;
        int err;
 
-       err = mptcp_subflow_create_socket(sk, &ssock);
+       err = mptcp_subflow_create_socket(sk, sk->sk_family, &ssock);
        if (err)
                return err;
 
@@ -1662,6 +1662,8 @@ static void mptcp_set_nospace(struct sock *sk)
        set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags);
 }
 
+static int mptcp_disconnect(struct sock *sk, int flags);
+
 static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msghdr *msg,
                                  size_t len, int *copied_syn)
 {
@@ -1672,9 +1674,9 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
        lock_sock(ssk);
        msg->msg_flags |= MSG_DONTWAIT;
        msk->connect_flags = O_NONBLOCK;
-       msk->is_sendmsg = 1;
+       msk->fastopening = 1;
        ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
-       msk->is_sendmsg = 0;
+       msk->fastopening = 0;
        msg->msg_flags = saved_flags;
        release_sock(ssk);
 
@@ -1688,6 +1690,8 @@ static int mptcp_sendmsg_fastopen(struct sock *sk, struct sock *ssk, struct msgh
                 */
                if (ret && ret != -EINPROGRESS && ret != -ERESTARTSYS && ret != -EINTR)
                        *copied_syn = 0;
+       } else if (ret && ret != -EINPROGRESS) {
+               mptcp_disconnect(sk, 0);
        }
 
        return ret;
@@ -2353,7 +2357,7 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                /* otherwise tcp will dispose of the ssk and subflow ctx */
                if (ssk->sk_state == TCP_LISTEN) {
                        tcp_set_state(ssk, TCP_CLOSE);
-                       mptcp_subflow_queue_clean(ssk);
+                       mptcp_subflow_queue_clean(sk, ssk);
                        inet_csk_listen_stop(ssk);
                        mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
                }
@@ -2989,6 +2993,14 @@ static int mptcp_disconnect(struct sock *sk, int flags)
 {
        struct mptcp_sock *msk = mptcp_sk(sk);
 
+       /* We are on the fastopen error path. We can't call straight into the
+        * subflows cleanup code due to lock nesting (we are already under
+        * msk->firstsocket lock). Do nothing and leave the cleanup to the
+        * caller.
+        */
+       if (msk->fastopening)
+               return 0;
+
        inet_sk_state_store(sk, TCP_CLOSE);
 
        mptcp_stop_timer(sk);
@@ -3532,7 +3544,7 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        /* if reaching here via the fastopen/sendmsg path, the caller already
         * acquired the subflow socket lock, too.
         */
-       if (msk->is_sendmsg)
+       if (msk->fastopening)
                err = __inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags, 1);
        else
                err = inet_stream_connect(ssock, uaddr, addr_len, msk->connect_flags);
index 955fb3d88eb3a8ae32f0aa124c3f89a0e6ea6e4a..601469249da803d1ec622e4ebc6b219dd31a7d06 100644 (file)
@@ -295,7 +295,7 @@ struct mptcp_sock {
        u8              recvmsg_inq:1,
                        cork:1,
                        nodelay:1,
-                       is_sendmsg:1;
+                       fastopening:1;
        int             connect_flags;
        struct work_struct work;
        struct sk_buff  *ooo_last_skb;
@@ -628,7 +628,7 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
                     struct mptcp_subflow_context *subflow);
 void __mptcp_subflow_send_ack(struct sock *ssk);
 void mptcp_subflow_reset(struct sock *ssk);
-void mptcp_subflow_queue_clean(struct sock *ssk);
+void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk);
 void mptcp_sock_graft(struct sock *sk, struct socket *parent);
 struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
 bool __mptcp_close(struct sock *sk, long timeout);
@@ -641,7 +641,8 @@ bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
 /* called with sk socket lock held */
 int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
                            const struct mptcp_addr_info *remote);
-int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
+int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+                               struct socket **new_sock);
 void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
                         struct sockaddr_storage *addr,
                         unsigned short family);
@@ -776,6 +777,9 @@ int mptcp_pm_parse_addr(struct nlattr *attr, struct genl_info *info,
 int mptcp_pm_parse_entry(struct nlattr *attr, struct genl_info *info,
                         bool require_family,
                         struct mptcp_pm_addr_entry *entry);
+bool mptcp_pm_addr_families_match(const struct sock *sk,
+                                 const struct mptcp_addr_info *loc,
+                                 const struct mptcp_addr_info *rem);
 void mptcp_pm_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
 void mptcp_pm_nl_subflow_chk_stale(const struct mptcp_sock *msk, struct sock *ssk);
 void mptcp_pm_new_connection(struct mptcp_sock *msk, const struct sock *ssk, int server_side);
index d1d32a66ae3f73f82f3c6a159a73c89235f6449d..ec54413fb31f7862ccf1602395235a7888e44dd4 100644 (file)
@@ -1547,7 +1547,7 @@ int __mptcp_subflow_connect(struct sock *sk, const struct mptcp_addr_info *loc,
        if (!mptcp_is_fully_established(sk))
                goto err_out;
 
-       err = mptcp_subflow_create_socket(sk, &sf);
+       err = mptcp_subflow_create_socket(sk, loc->family, &sf);
        if (err)
                goto err_out;
 
@@ -1660,7 +1660,9 @@ static void mptcp_subflow_ops_undo_override(struct sock *ssk)
 #endif
                ssk->sk_prot = &tcp_prot;
 }
-int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
+
+int mptcp_subflow_create_socket(struct sock *sk, unsigned short family,
+                               struct socket **new_sock)
 {
        struct mptcp_subflow_context *subflow;
        struct net *net = sock_net(sk);
@@ -1673,8 +1675,7 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock)
        if (unlikely(!sk->sk_socket))
                return -EINVAL;
 
-       err = sock_create_kern(net, sk->sk_family, SOCK_STREAM, IPPROTO_TCP,
-                              &sf);
+       err = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, &sf);
        if (err)
                return err;
 
@@ -1791,7 +1792,7 @@ static void subflow_state_change(struct sock *sk)
        }
 }
 
-void mptcp_subflow_queue_clean(struct sock *listener_ssk)
+void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk)
 {
        struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue;
        struct mptcp_sock *msk, *next, *head = NULL;
@@ -1840,8 +1841,23 @@ void mptcp_subflow_queue_clean(struct sock *listener_ssk)
 
                do_cancel_work = __mptcp_close(sk, 0);
                release_sock(sk);
-               if (do_cancel_work)
+               if (do_cancel_work) {
+                       /* lockdep will report a false positive ABBA deadlock
+                        * between cancel_work_sync and the listener socket.
+                        * The involved locks belong to different sockets WRT
+                        * the existing AB chain.
+                        * Using a per socket key is problematic as key
+                        * deregistration requires process context and must be
+                        * performed at socket disposal time, in atomic
+                        * context.
+                        * Just tell lockdep to consider the listener socket
+                        * released here.
+                        */
+                       mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_);
                        mptcp_cancel_work(sk);
+                       mutex_acquire(&listener_sk->sk_lock.dep_map,
+                                     SINGLE_DEPTH_NESTING, 0, _RET_IP_);
+               }
                sock_put(sk);
        }
 
index a8ce04a4bb72abef52524edad94dc7d69d39458a..e4fa00abde6a2ab9941c8cd7d0b8a12f537503e0 100644 (file)
@@ -308,8 +308,8 @@ bitmap_ip_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
                        return -IPSET_ERR_BITMAP_RANGE;
 
                pr_debug("mask_bits %u, netmask %u\n", mask_bits, netmask);
-               hosts = 2 << (32 - netmask - 1);
-               elements = 2 << (netmask - mask_bits - 1);
+               hosts = 2U << (32 - netmask - 1);
+               elements = 2UL << (netmask - mask_bits - 1);
        }
        if (elements > IPSET_BITMAP_MAX_RANGE + 1)
                return -IPSET_ERR_BITMAP_RANGE_SIZE;
index e7ba5b6dd2b7c1792e76723f68553f8d174e1ae9..46ebee9400dab83ef9a14a55bbfb1d2bd68005d4 100644 (file)
@@ -1698,9 +1698,10 @@ call_ad(struct net *net, struct sock *ctnl, struct sk_buff *skb,
                ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
                ip_set_unlock(set);
                retried = true;
-       } while (ret == -EAGAIN &&
-                set->variant->resize &&
-                (ret = set->variant->resize(set, retried)) == 0);
+       } while (ret == -ERANGE ||
+                (ret == -EAGAIN &&
+                 set->variant->resize &&
+                 (ret = set->variant->resize(set, retried)) == 0));
 
        if (!ret || (ret == -IPSET_ERR_EXIST && eexist))
                return 0;
index e30513cefd90e729394c3f5e235462b7b681ff41..c9f4e38596632338695e783cc7fb70ab12cd94d6 100644 (file)
@@ -100,11 +100,11 @@ static int
 hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
              enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_ip4 *h = set->data;
+       struct hash_ip4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ip4_elem e = { 0 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0, hosts;
+       u32 ip = 0, ip_to = 0, hosts, i = 0;
        int ret = 0;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -149,14 +149,14 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
-       /* 64bit division is not allowed on 32bit */
-       if (((u64)ip_to - ip + 1) >> (32 - h->netmask) > IPSET_MAX_RANGE)
-               return -ERANGE;
-
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; ip <= ip_to;) {
+       for (; ip <= ip_to; i++) {
                e.ip = htonl(ip);
+               if (i > IPSET_MAX_RANGE) {
+                       hash_ip4_data_next(&h->next, &e);
+                       return -ERANGE;
+               }
                ret = adtfn(set, &e, &ext, &ext, flags);
                if (ret && !ip_set_eexist(ret, flags))
                        return ret;
index 153de3457423e58601f309bb797da33cc1a40229..a22ec1a6f6ec85b1c7c3cf04a824e02a1c74057c 100644 (file)
@@ -97,11 +97,11 @@ static int
 hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_ipmark4 *h = set->data;
+       struct hash_ipmark4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipmark4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip, ip_to = 0;
+       u32 ip, ip_to = 0, i = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -148,13 +148,14 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[],
                ip_set_mask_from_to(ip, ip_to, cidr);
        }
 
-       if (((u64)ip_to - ip + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
-
        if (retried)
                ip = ntohl(h->next.ip);
-       for (; ip <= ip_to; ip++) {
+       for (; ip <= ip_to; ip++, i++) {
                e.ip = htonl(ip);
+               if (i > IPSET_MAX_RANGE) {
+                       hash_ipmark4_data_next(&h->next, &e);
+                       return -ERANGE;
+               }
                ret = adtfn(set, &e, &ext, &ext, flags);
 
                if (ret && !ip_set_eexist(ret, flags))
index 2ffbd0b78a8c529495fa334bf17223808cad9692..e977b5a9c48dcb23c8d89f78f7e6c0ae2b3e266a 100644 (file)
@@ -112,11 +112,11 @@ static int
 hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_ipport4 *h = set->data;
+       struct hash_ipport4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem e = { .ip = 0 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
        bool with_ports = false;
        int ret;
 
@@ -184,17 +184,18 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
-       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
-
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
-               for (; p <= port_to; p++) {
+               for (; p <= port_to; p++, i++) {
                        e.ip = htonl(ip);
                        e.port = htons(p);
+                       if (i > IPSET_MAX_RANGE) {
+                               hash_ipport4_data_next(&h->next, &e);
+                               return -ERANGE;
+                       }
                        ret = adtfn(set, &e, &ext, &ext, flags);
 
                        if (ret && !ip_set_eexist(ret, flags))
index 334fb1ad0e86cfca13fa5c3e69b5ae84ffc0fc90..39a01934b1536d800b14a6cddc4175cf64126ee7 100644 (file)
@@ -108,11 +108,11 @@ static int
 hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                    enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_ipportip4 *h = set->data;
+       struct hash_ipportip4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem e = { .ip = 0 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to = 0, p = 0, port, port_to, i = 0;
        bool with_ports = false;
        int ret;
 
@@ -180,17 +180,18 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
-       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
-
        if (retried)
                ip = ntohl(h->next.ip);
        for (; ip <= ip_to; ip++) {
                p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
                                                       : port;
-               for (; p <= port_to; p++) {
+               for (; p <= port_to; p++, i++) {
                        e.ip = htonl(ip);
                        e.port = htons(p);
+                       if (i > IPSET_MAX_RANGE) {
+                               hash_ipportip4_data_next(&h->next, &e);
+                               return -ERANGE;
+                       }
                        ret = adtfn(set, &e, &ext, &ext, flags);
 
                        if (ret && !ip_set_eexist(ret, flags))
index 7df94f437f600f98e48a62dcfb3078b8b01da6e2..5c6de605a9fb7fd13db6b3497685d39d47a25911 100644 (file)
@@ -160,12 +160,12 @@ static int
 hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                     enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_ipportnet4 *h = set->data;
+       struct hash_ipportnet4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to = 0, ip2;
+       u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -253,9 +253,6 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                        swap(port, port_to);
        }
 
-       if (((u64)ip_to - ip + 1)*(port_to - port + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
-
        ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
@@ -282,9 +279,15 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                for (; p <= port_to; p++) {
                        e.port = htons(p);
                        do {
+                               i++;
                                e.ip2 = htonl(ip2);
                                ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr);
                                e.cidr = cidr - 1;
+                               if (i > IPSET_MAX_RANGE) {
+                                       hash_ipportnet4_data_next(&h->next,
+                                                                 &e);
+                                       return -ERANGE;
+                               }
                                ret = adtfn(set, &e, &ext, &ext, flags);
 
                                if (ret && !ip_set_eexist(ret, flags))
index 1422739d9aa255fb0b7620b7057eabc137b00d84..ce0a9ce5a91f1a08ec8ceb666cee3979fc5bddc3 100644 (file)
@@ -136,11 +136,11 @@ static int
 hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
               enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_net4 *h = set->data;
+       struct hash_net4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_net4_elem e = { .cidr = HOST_MASK };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0, ipn, n = 0;
+       u32 ip = 0, ip_to = 0, i = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -188,19 +188,16 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (ip + UINT_MAX == ip_to)
                        return -IPSET_ERR_HASH_RANGE;
        }
-       ipn = ip;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
-               n++;
-       } while (ipn++ < ip_to);
-
-       if (n > IPSET_MAX_RANGE)
-               return -ERANGE;
 
        if (retried)
                ip = ntohl(h->next.ip);
        do {
+               i++;
                e.ip = htonl(ip);
+               if (i > IPSET_MAX_RANGE) {
+                       hash_net4_data_next(&h->next, &e);
+                       return -ERANGE;
+               }
                ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
                if (ret && !ip_set_eexist(ret, flags))
index 9810f5bf63f5e42d5715d0419049c0c097f2e587..0310732862362c68a805fa4d7648b616c9ffc102 100644 (file)
@@ -202,7 +202,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netiface4_elem e = { .cidr = HOST_MASK, .elem = 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 ip = 0, ip_to = 0, ipn, n = 0;
+       u32 ip = 0, ip_to = 0, i = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -256,19 +256,16 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr);
        }
-       ipn = ip;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr);
-               n++;
-       } while (ipn++ < ip_to);
-
-       if (n > IPSET_MAX_RANGE)
-               return -ERANGE;
 
        if (retried)
                ip = ntohl(h->next.ip);
        do {
+               i++;
                e.ip = htonl(ip);
+               if (i > IPSET_MAX_RANGE) {
+                       hash_netiface4_data_next(&h->next, &e);
+                       return -ERANGE;
+               }
                ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr);
                ret = adtfn(set, &e, &ext, &ext, flags);
 
index cdfb78c6e0d3de56488cebc5a34df470a7e5ed36..8fbe649c9dd3d48691655e2155f94da8e0f16b2a 100644 (file)
@@ -166,13 +166,12 @@ static int
 hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_netnet4 *h = set->data;
+       struct hash_netnet4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0;
-       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, ipn;
-       u64 n = 0, m = 0;
+       u32 ip2 = 0, ip2_from = 0, ip2_to = 0, i = 0;
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
@@ -248,19 +247,6 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
-       ipn = ip;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
-               n++;
-       } while (ipn++ < ip_to);
-       ipn = ip2_from;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
-               m++;
-       } while (ipn++ < ip2_to);
-
-       if (n*m > IPSET_MAX_RANGE)
-               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
@@ -273,7 +259,12 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                e.ip[0] = htonl(ip);
                ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
                do {
+                       i++;
                        e.ip[1] = htonl(ip2);
+                       if (i > IPSET_MAX_RANGE) {
+                               hash_netnet4_data_next(&h->next, &e);
+                               return -ERANGE;
+                       }
                        ip2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]);
                        ret = adtfn(set, &e, &ext, &ext, flags);
                        if (ret && !ip_set_eexist(ret, flags))
index 09cf72eb37f8d2549b3527530f1e4ca9e7d10efa..d1a0628df4ef3a7301a8e972874106dc135176fe 100644 (file)
@@ -154,12 +154,11 @@ static int
 hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
                   enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_netport4 *h = set->data;
+       struct hash_netport4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netport4_elem e = { .cidr = HOST_MASK - 1 };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
-       u32 port, port_to, p = 0, ip = 0, ip_to = 0, ipn;
-       u64 n = 0;
+       u32 port, port_to, p = 0, ip = 0, ip_to = 0, i = 0;
        bool with_ports = false;
        u8 cidr;
        int ret;
@@ -236,14 +235,6 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip, ip_to, e.cidr + 1);
        }
-       ipn = ip;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip_to, &cidr);
-               n++;
-       } while (ipn++ < ip_to);
-
-       if (n*(port_to - port + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip);
@@ -255,8 +246,12 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
                e.ip = htonl(ip);
                ip = ip_set_range_to_cidr(ip, ip_to, &cidr);
                e.cidr = cidr - 1;
-               for (; p <= port_to; p++) {
+               for (; p <= port_to; p++, i++) {
                        e.port = htons(p);
+                       if (i > IPSET_MAX_RANGE) {
+                               hash_netport4_data_next(&h->next, &e);
+                               return -ERANGE;
+                       }
                        ret = adtfn(set, &e, &ext, &ext, flags);
                        if (ret && !ip_set_eexist(ret, flags))
                                return ret;
index 19bcdb3141f6e6f17d061477c8517f0f94ef0a55..005a7ce87217e24c292e27eddb03f1320fed4a6a 100644 (file)
@@ -173,17 +173,26 @@ hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
        return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
 
+static u32
+hash_netportnet4_range_to_cidr(u32 from, u32 to, u8 *cidr)
+{
+       if (from == 0 && to == UINT_MAX) {
+               *cidr = 0;
+               return to;
+       }
+       return ip_set_range_to_cidr(from, to, cidr);
+}
+
 static int
 hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                      enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
-       const struct hash_netportnet4 *h = set->data;
+       struct hash_netportnet4 *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_netportnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to = 0, ip2, ipn;
-       u64 n = 0, m = 0;
+       u32 ip2_from = 0, ip2_to = 0, ip2, i = 0;
        bool with_ports = false;
        int ret;
 
@@ -285,19 +294,6 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        } else {
                ip_set_mask_from_to(ip2_from, ip2_to, e.cidr[1]);
        }
-       ipn = ip;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip_to, &e.cidr[0]);
-               n++;
-       } while (ipn++ < ip_to);
-       ipn = ip2_from;
-       do {
-               ipn = ip_set_range_to_cidr(ipn, ip2_to, &e.cidr[1]);
-               m++;
-       } while (ipn++ < ip2_to);
-
-       if (n*m*(port_to - port + 1) > IPSET_MAX_RANGE)
-               return -ERANGE;
 
        if (retried) {
                ip = ntohl(h->next.ip[0]);
@@ -310,13 +306,19 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 
        do {
                e.ip[0] = htonl(ip);
-               ip = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]);
+               ip = hash_netportnet4_range_to_cidr(ip, ip_to, &e.cidr[0]);
                for (; p <= port_to; p++) {
                        e.port = htons(p);
                        do {
+                               i++;
                                e.ip[1] = htonl(ip2);
-                               ip2 = ip_set_range_to_cidr(ip2, ip2_to,
-                                                          &e.cidr[1]);
+                               if (i > IPSET_MAX_RANGE) {
+                                       hash_netportnet4_data_next(&h->next,
+                                                                  &e);
+                                       return -ERANGE;
+                               }
+                               ip2 = hash_netportnet4_range_to_cidr(ip2,
+                                                       ip2_to, &e.cidr[1]);
                                ret = adtfn(set, &e, &ext, &ext, flags);
                                if (ret && !ip_set_eexist(ret, flags))
                                        return ret;
index 99323fb12d0f5196e0f00fdecea540c53c420b1e..ccef340be575ec96a5db8970a31e42022bebdb45 100644 (file)
@@ -141,6 +141,7 @@ unsigned int nf_confirm(void *priv,
        struct nf_conn *ct;
        bool seqadj_needed;
        __be16 frag_off;
+       int start;
        u8 pnum;
 
        ct = nf_ct_get(skb, &ctinfo);
@@ -163,9 +164,11 @@ unsigned int nf_confirm(void *priv,
                break;
        case NFPROTO_IPV6:
                pnum = ipv6_hdr(skb)->nexthdr;
-               protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off);
-               if (protoff < 0 || (frag_off & htons(~0x7)) != 0)
+               start = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, &frag_off);
+               if (start < 0 || (frag_off & htons(~0x7)) != 0)
                        return nf_conntrack_confirm(skb);
+
+               protoff = start;
                break;
        default:
                return nf_conntrack_confirm(skb);
index 6566310831779bbac55317361adfaf2f88b143d0..3ac1af6f59fccaad5f1cd0053f038e7ed6acadc4 100644 (file)
@@ -1068,6 +1068,13 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                                ct->proto.tcp.last_flags |=
                                        IP_CT_EXP_CHALLENGE_ACK;
                }
+
+               /* possible challenge ack reply to syn */
+               if (old_state == TCP_CONNTRACK_SYN_SENT &&
+                   index == TCP_ACK_SET &&
+                   dir == IP_CT_DIR_REPLY)
+                       ct->proto.tcp.last_ack = ntohl(th->ack_seq);
+
                spin_unlock_bh(&ct->lock);
                nf_ct_l4proto_log_invalid(skb, ct, state,
                                          "packet (index %d) in dir %d ignored, state %s",
@@ -1193,6 +1200,14 @@ int nf_conntrack_tcp_packet(struct nf_conn *ct,
                         * segments we ignored. */
                        goto in_window;
                }
+
+               /* Reset in response to a challenge-ack we let through earlier */
+               if (old_state == TCP_CONNTRACK_SYN_SENT &&
+                   ct->proto.tcp.last_index == TCP_ACK_SET &&
+                   ct->proto.tcp.last_dir == IP_CT_DIR_REPLY &&
+                   ntohl(th->seq) == ct->proto.tcp.last_ack)
+                       goto in_window;
+
                break;
        default:
                /* Keep compilers happy. */
index 832b881f7c1749dba7759123695368c48066821f..8c09e4d12ac1ee05127581b3bc016c6c02742a93 100644 (file)
@@ -465,8 +465,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
        return 0;
 }
 
-static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
-                            struct nft_set *set)
+static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+                              struct nft_set *set,
+                              const struct nft_set_desc *desc)
 {
        struct nft_trans *trans;
 
@@ -474,17 +475,28 @@ static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
        if (trans == NULL)
                return -ENOMEM;
 
-       if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] != NULL) {
+       if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) {
                nft_trans_set_id(trans) =
                        ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID]));
                nft_activate_next(ctx->net, set);
        }
        nft_trans_set(trans) = set;
+       if (desc) {
+               nft_trans_set_update(trans) = true;
+               nft_trans_set_gc_int(trans) = desc->gc_int;
+               nft_trans_set_timeout(trans) = desc->timeout;
+       }
        nft_trans_commit_list_add_tail(ctx->net, trans);
 
        return 0;
 }
 
+static int nft_trans_set_add(const struct nft_ctx *ctx, int msg_type,
+                            struct nft_set *set)
+{
+       return __nft_trans_set_add(ctx, msg_type, set, NULL);
+}
+
 static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set)
 {
        int err;
@@ -3780,8 +3792,7 @@ static bool nft_set_ops_candidate(const struct nft_set_type *type, u32 flags)
 static const struct nft_set_ops *
 nft_select_set_ops(const struct nft_ctx *ctx,
                   const struct nlattr * const nla[],
-                  const struct nft_set_desc *desc,
-                  enum nft_set_policies policy)
+                  const struct nft_set_desc *desc)
 {
        struct nftables_pernet *nft_net = nft_pernet(ctx->net);
        const struct nft_set_ops *ops, *bops;
@@ -3810,7 +3821,7 @@ nft_select_set_ops(const struct nft_ctx *ctx,
                if (!ops->estimate(desc, flags, &est))
                        continue;
 
-               switch (policy) {
+               switch (desc->policy) {
                case NFT_SET_POL_PERFORMANCE:
                        if (est.lookup < best.lookup)
                                break;
@@ -4045,8 +4056,10 @@ static int nf_tables_fill_set_concat(struct sk_buff *skb,
 static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
                              const struct nft_set *set, u16 event, u16 flags)
 {
-       struct nlmsghdr *nlh;
+       u64 timeout = READ_ONCE(set->timeout);
+       u32 gc_int = READ_ONCE(set->gc_int);
        u32 portid = ctx->portid;
+       struct nlmsghdr *nlh;
        struct nlattr *nest;
        u32 seq = ctx->seq;
        int i;
@@ -4082,13 +4095,13 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx,
            nla_put_be32(skb, NFTA_SET_OBJ_TYPE, htonl(set->objtype)))
                goto nla_put_failure;
 
-       if (set->timeout &&
+       if (timeout &&
            nla_put_be64(skb, NFTA_SET_TIMEOUT,
-                        nf_jiffies64_to_msecs(set->timeout),
+                        nf_jiffies64_to_msecs(timeout),
                         NFTA_SET_PAD))
                goto nla_put_failure;
-       if (set->gc_int &&
-           nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(set->gc_int)))
+       if (gc_int &&
+           nla_put_be32(skb, NFTA_SET_GC_INTERVAL, htonl(gc_int)))
                goto nla_put_failure;
 
        if (set->policy != NFT_SET_POL_PERFORMANCE) {
@@ -4389,15 +4402,94 @@ static int nf_tables_set_desc_parse(struct nft_set_desc *desc,
        return err;
 }
 
+static int nft_set_expr_alloc(struct nft_ctx *ctx, struct nft_set *set,
+                             const struct nlattr * const *nla,
+                             struct nft_expr **exprs, int *num_exprs,
+                             u32 flags)
+{
+       struct nft_expr *expr;
+       int err, i;
+
+       if (nla[NFTA_SET_EXPR]) {
+               expr = nft_set_elem_expr_alloc(ctx, set, nla[NFTA_SET_EXPR]);
+               if (IS_ERR(expr)) {
+                       err = PTR_ERR(expr);
+                       goto err_set_expr_alloc;
+               }
+               exprs[0] = expr;
+               (*num_exprs)++;
+       } else if (nla[NFTA_SET_EXPRESSIONS]) {
+               struct nlattr *tmp;
+               int left;
+
+               if (!(flags & NFT_SET_EXPR)) {
+                       err = -EINVAL;
+                       goto err_set_expr_alloc;
+               }
+               i = 0;
+               nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
+                       if (i == NFT_SET_EXPR_MAX) {
+                               err = -E2BIG;
+                               goto err_set_expr_alloc;
+                       }
+                       if (nla_type(tmp) != NFTA_LIST_ELEM) {
+                               err = -EINVAL;
+                               goto err_set_expr_alloc;
+                       }
+                       expr = nft_set_elem_expr_alloc(ctx, set, tmp);
+                       if (IS_ERR(expr)) {
+                               err = PTR_ERR(expr);
+                               goto err_set_expr_alloc;
+                       }
+                       exprs[i++] = expr;
+                       (*num_exprs)++;
+               }
+       }
+
+       return 0;
+
+err_set_expr_alloc:
+       for (i = 0; i < *num_exprs; i++)
+               nft_expr_destroy(ctx, exprs[i]);
+
+       return err;
+}
+
+static bool nft_set_is_same(const struct nft_set *set,
+                           const struct nft_set_desc *desc,
+                           struct nft_expr *exprs[], u32 num_exprs, u32 flags)
+{
+       int i;
+
+       if (set->ktype != desc->ktype ||
+           set->dtype != desc->dtype ||
+           set->flags != flags ||
+           set->klen != desc->klen ||
+           set->dlen != desc->dlen ||
+           set->field_count != desc->field_count ||
+           set->num_exprs != num_exprs)
+               return false;
+
+       for (i = 0; i < desc->field_count; i++) {
+               if (set->field_len[i] != desc->field_len[i])
+                       return false;
+       }
+
+       for (i = 0; i < num_exprs; i++) {
+               if (set->exprs[i]->ops != exprs[i]->ops)
+                       return false;
+       }
+
+       return true;
+}
+
 static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                            const struct nlattr * const nla[])
 {
-       u32 ktype, dtype, flags, policy, gc_int, objtype;
        struct netlink_ext_ack *extack = info->extack;
        u8 genmask = nft_genmask_next(info->net);
        u8 family = info->nfmsg->nfgen_family;
        const struct nft_set_ops *ops;
-       struct nft_expr *expr = NULL;
        struct net *net = info->net;
        struct nft_set_desc desc;
        struct nft_table *table;
@@ -4405,10 +4497,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        struct nft_set *set;
        struct nft_ctx ctx;
        size_t alloc_size;
-       u64 timeout;
+       int num_exprs = 0;
        char *name;
        int err, i;
        u16 udlen;
+       u32 flags;
        u64 size;
 
        if (nla[NFTA_SET_TABLE] == NULL ||
@@ -4419,10 +4512,10 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
 
        memset(&desc, 0, sizeof(desc));
 
-       ktype = NFT_DATA_VALUE;
+       desc.ktype = NFT_DATA_VALUE;
        if (nla[NFTA_SET_KEY_TYPE] != NULL) {
-               ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
-               if ((ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
+               desc.ktype = ntohl(nla_get_be32(nla[NFTA_SET_KEY_TYPE]));
+               if ((desc.ktype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK)
                        return -EINVAL;
        }
 
@@ -4447,17 +4540,17 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                        return -EOPNOTSUPP;
        }
 
-       dtype = 0;
+       desc.dtype = 0;
        if (nla[NFTA_SET_DATA_TYPE] != NULL) {
                if (!(flags & NFT_SET_MAP))
                        return -EINVAL;
 
-               dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
-               if ((dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
-                   dtype != NFT_DATA_VERDICT)
+               desc.dtype = ntohl(nla_get_be32(nla[NFTA_SET_DATA_TYPE]));
+               if ((desc.dtype & NFT_DATA_RESERVED_MASK) == NFT_DATA_RESERVED_MASK &&
+                   desc.dtype != NFT_DATA_VERDICT)
                        return -EINVAL;
 
-               if (dtype != NFT_DATA_VERDICT) {
+               if (desc.dtype != NFT_DATA_VERDICT) {
                        if (nla[NFTA_SET_DATA_LEN] == NULL)
                                return -EINVAL;
                        desc.dlen = ntohl(nla_get_be32(nla[NFTA_SET_DATA_LEN]));
@@ -4472,34 +4565,34 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                if (!(flags & NFT_SET_OBJECT))
                        return -EINVAL;
 
-               objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
-               if (objtype == NFT_OBJECT_UNSPEC ||
-                   objtype > NFT_OBJECT_MAX)
+               desc.objtype = ntohl(nla_get_be32(nla[NFTA_SET_OBJ_TYPE]));
+               if (desc.objtype == NFT_OBJECT_UNSPEC ||
+                   desc.objtype > NFT_OBJECT_MAX)
                        return -EOPNOTSUPP;
        } else if (flags & NFT_SET_OBJECT)
                return -EINVAL;
        else
-               objtype = NFT_OBJECT_UNSPEC;
+               desc.objtype = NFT_OBJECT_UNSPEC;
 
-       timeout = 0;
+       desc.timeout = 0;
        if (nla[NFTA_SET_TIMEOUT] != NULL) {
                if (!(flags & NFT_SET_TIMEOUT))
                        return -EINVAL;
 
-               err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &timeout);
+               err = nf_msecs_to_jiffies64(nla[NFTA_SET_TIMEOUT], &desc.timeout);
                if (err)
                        return err;
        }
-       gc_int = 0;
+       desc.gc_int = 0;
        if (nla[NFTA_SET_GC_INTERVAL] != NULL) {
                if (!(flags & NFT_SET_TIMEOUT))
                        return -EINVAL;
-               gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
+               desc.gc_int = ntohl(nla_get_be32(nla[NFTA_SET_GC_INTERVAL]));
        }
 
-       policy = NFT_SET_POL_PERFORMANCE;
+       desc.policy = NFT_SET_POL_PERFORMANCE;
        if (nla[NFTA_SET_POLICY] != NULL)
-               policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
+               desc.policy = ntohl(nla_get_be32(nla[NFTA_SET_POLICY]));
 
        if (nla[NFTA_SET_DESC] != NULL) {
                err = nf_tables_set_desc_parse(&desc, nla[NFTA_SET_DESC]);
@@ -4531,6 +4624,8 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                        return PTR_ERR(set);
                }
        } else {
+               struct nft_expr *exprs[NFT_SET_EXPR_MAX] = {};
+
                if (info->nlh->nlmsg_flags & NLM_F_EXCL) {
                        NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
                        return -EEXIST;
@@ -4538,13 +4633,29 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
                if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EOPNOTSUPP;
 
-               return 0;
+               err = nft_set_expr_alloc(&ctx, set, nla, exprs, &num_exprs, flags);
+               if (err < 0)
+                       return err;
+
+               err = 0;
+               if (!nft_set_is_same(set, &desc, exprs, num_exprs, flags)) {
+                       NL_SET_BAD_ATTR(extack, nla[NFTA_SET_NAME]);
+                       err = -EEXIST;
+               }
+
+               for (i = 0; i < num_exprs; i++)
+                       nft_expr_destroy(&ctx, exprs[i]);
+
+               if (err < 0)
+                       return err;
+
+               return __nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set, &desc);
        }
 
        if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
                return -ENOENT;
 
-       ops = nft_select_set_ops(&ctx, nla, &desc, policy);
+       ops = nft_select_set_ops(&ctx, nla, &desc);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
 
@@ -4584,18 +4695,18 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        set->table = table;
        write_pnet(&set->net, net);
        set->ops = ops;
-       set->ktype = ktype;
+       set->ktype = desc.ktype;
        set->klen = desc.klen;
-       set->dtype = dtype;
-       set->objtype = objtype;
+       set->dtype = desc.dtype;
+       set->objtype = desc.objtype;
        set->dlen = desc.dlen;
        set->flags = flags;
        set->size = desc.size;
-       set->policy = policy;
+       set->policy = desc.policy;
        set->udlen = udlen;
        set->udata = udata;
-       set->timeout = timeout;
-       set->gc_int = gc_int;
+       set->timeout = desc.timeout;
+       set->gc_int = desc.gc_int;
 
        set->field_count = desc.field_count;
        for (i = 0; i < desc.field_count; i++)
@@ -4605,43 +4716,11 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
        if (err < 0)
                goto err_set_init;
 
-       if (nla[NFTA_SET_EXPR]) {
-               expr = nft_set_elem_expr_alloc(&ctx, set, nla[NFTA_SET_EXPR]);
-               if (IS_ERR(expr)) {
-                       err = PTR_ERR(expr);
-                       goto err_set_expr_alloc;
-               }
-               set->exprs[0] = expr;
-               set->num_exprs++;
-       } else if (nla[NFTA_SET_EXPRESSIONS]) {
-               struct nft_expr *expr;
-               struct nlattr *tmp;
-               int left;
-
-               if (!(flags & NFT_SET_EXPR)) {
-                       err = -EINVAL;
-                       goto err_set_expr_alloc;
-               }
-               i = 0;
-               nla_for_each_nested(tmp, nla[NFTA_SET_EXPRESSIONS], left) {
-                       if (i == NFT_SET_EXPR_MAX) {
-                               err = -E2BIG;
-                               goto err_set_expr_alloc;
-                       }
-                       if (nla_type(tmp) != NFTA_LIST_ELEM) {
-                               err = -EINVAL;
-                               goto err_set_expr_alloc;
-                       }
-                       expr = nft_set_elem_expr_alloc(&ctx, set, tmp);
-                       if (IS_ERR(expr)) {
-                               err = PTR_ERR(expr);
-                               goto err_set_expr_alloc;
-                       }
-                       set->exprs[i++] = expr;
-                       set->num_exprs++;
-               }
-       }
+       err = nft_set_expr_alloc(&ctx, set, nla, set->exprs, &num_exprs, flags);
+       if (err < 0)
+               goto err_set_destroy;
 
+       set->num_exprs = num_exprs;
        set->handle = nf_tables_alloc_handle(table);
 
        err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set);
@@ -4655,7 +4734,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info,
 err_set_expr_alloc:
        for (i = 0; i < set->num_exprs; i++)
                nft_expr_destroy(&ctx, set->exprs[i]);
-
+err_set_destroy:
        ops->destroy(set);
 err_set_init:
        kfree(set->name);
@@ -6008,7 +6087,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                        return err;
        } else if (set->flags & NFT_SET_TIMEOUT &&
                   !(flags & NFT_SET_ELEM_INTERVAL_END)) {
-               timeout = set->timeout;
+               timeout = READ_ONCE(set->timeout);
        }
 
        expiration = 0;
@@ -6109,7 +6188,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
                if (err < 0)
                        goto err_parse_key_end;
 
-               if (timeout != set->timeout) {
+               if (timeout != READ_ONCE(set->timeout)) {
                        err = nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
                        if (err < 0)
                                goto err_parse_key_end;
@@ -9031,14 +9110,20 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
                                nft_flow_rule_destroy(nft_trans_flow_rule(trans));
                        break;
                case NFT_MSG_NEWSET:
-                       nft_clear(net, nft_trans_set(trans));
-                       /* This avoids hitting -EBUSY when deleting the table
-                        * from the transaction.
-                        */
-                       if (nft_set_is_anonymous(nft_trans_set(trans)) &&
-                           !list_empty(&nft_trans_set(trans)->bindings))
-                               trans->ctx.table->use--;
+                       if (nft_trans_set_update(trans)) {
+                               struct nft_set *set = nft_trans_set(trans);
 
+                               WRITE_ONCE(set->timeout, nft_trans_set_timeout(trans));
+                               WRITE_ONCE(set->gc_int, nft_trans_set_gc_int(trans));
+                       } else {
+                               nft_clear(net, nft_trans_set(trans));
+                               /* This avoids hitting -EBUSY when deleting the table
+                                * from the transaction.
+                                */
+                               if (nft_set_is_anonymous(nft_trans_set(trans)) &&
+                                   !list_empty(&nft_trans_set(trans)->bindings))
+                                       trans->ctx.table->use--;
+                       }
                        nf_tables_set_notify(&trans->ctx, nft_trans_set(trans),
                                             NFT_MSG_NEWSET, GFP_KERNEL);
                        nft_trans_destroy(trans);
@@ -9260,6 +9345,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action)
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSET:
+                       if (nft_trans_set_update(trans)) {
+                               nft_trans_destroy(trans);
+                               break;
+                       }
                        trans->ctx.table->use--;
                        if (nft_trans_set_bound(trans)) {
                                nft_trans_destroy(trans);
index 17b418a5a593af2aa20f89e2a6ddc076488124c7..3a3c7746e88fe6ef0bc443454788bc5650f3b6d9 100644 (file)
@@ -63,7 +63,7 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
                        return false;
 
                if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
-                       ethlen -= offset + len - VLAN_ETH_HLEN + vlan_hlen;
+                       ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
 
                memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
 
index 3364caabef8b1908e264f8462553eef93c825d64..a27e1842b2a09648475759008f46ce085ce78f48 100644 (file)
@@ -157,6 +157,7 @@ static void local_cleanup(struct nfc_llcp_local *local)
        cancel_work_sync(&local->rx_work);
        cancel_work_sync(&local->timeout_work);
        kfree_skb(local->rx_pending);
+       local->rx_pending = NULL;
        del_timer_sync(&local->sdreq_timer);
        cancel_work_sync(&local->sdreq_timeout_work);
        nfc_llcp_free_sdp_tlv_list(&local->pending_sdreqs);
index 9d91087b93992bdf90f1f6a4f8960c3eed77d81b..1fc339084d897b3cedaa779b498a3702bd8962e8 100644 (file)
@@ -1497,6 +1497,7 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
        u32 dev_idx, se_idx;
        u8 *apdu;
        size_t apdu_len;
+       int rc;
 
        if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
            !info->attrs[NFC_ATTR_SE_INDEX] ||
@@ -1510,25 +1511,37 @@ static int nfc_genl_se_io(struct sk_buff *skb, struct genl_info *info)
        if (!dev)
                return -ENODEV;
 
-       if (!dev->ops || !dev->ops->se_io)
-               return -ENOTSUPP;
+       if (!dev->ops || !dev->ops->se_io) {
+               rc = -EOPNOTSUPP;
+               goto put_dev;
+       }
 
        apdu_len = nla_len(info->attrs[NFC_ATTR_SE_APDU]);
-       if (apdu_len == 0)
-               return -EINVAL;
+       if (apdu_len == 0) {
+               rc = -EINVAL;
+               goto put_dev;
+       }
 
        apdu = nla_data(info->attrs[NFC_ATTR_SE_APDU]);
-       if (!apdu)
-               return -EINVAL;
+       if (!apdu) {
+               rc = -EINVAL;
+               goto put_dev;
+       }
 
        ctx = kzalloc(sizeof(struct se_io_ctx), GFP_KERNEL);
-       if (!ctx)
-               return -ENOMEM;
+       if (!ctx) {
+               rc = -ENOMEM;
+               goto put_dev;
+       }
 
        ctx->dev_idx = dev_idx;
        ctx->se_idx = se_idx;
 
-       return nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+       rc = nfc_se_io(dev, se_idx, apdu, apdu_len, se_io_cb, ctx);
+
+put_dev:
+       nfc_put_device(dev);
+       return rc;
 }
 
 static int nfc_genl_vendor_cmd(struct sk_buff *skb,
@@ -1551,14 +1564,21 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
        subcmd = nla_get_u32(info->attrs[NFC_ATTR_VENDOR_SUBCMD]);
 
        dev = nfc_get_device(dev_idx);
-       if (!dev || !dev->vendor_cmds || !dev->n_vendor_cmds)
+       if (!dev)
                return -ENODEV;
 
+       if (!dev->vendor_cmds || !dev->n_vendor_cmds) {
+               err = -ENODEV;
+               goto put_dev;
+       }
+
        if (info->attrs[NFC_ATTR_VENDOR_DATA]) {
                data = nla_data(info->attrs[NFC_ATTR_VENDOR_DATA]);
                data_len = nla_len(info->attrs[NFC_ATTR_VENDOR_DATA]);
-               if (data_len == 0)
-                       return -EINVAL;
+               if (data_len == 0) {
+                       err = -EINVAL;
+                       goto put_dev;
+               }
        } else {
                data = NULL;
                data_len = 0;
@@ -1573,10 +1593,14 @@ static int nfc_genl_vendor_cmd(struct sk_buff *skb,
                dev->cur_cmd_info = info;
                err = cmd->doit(dev, data, data_len);
                dev->cur_cmd_info = NULL;
-               return err;
+               goto put_dev;
        }
 
-       return -EOPNOTSUPP;
+       err = -EOPNOTSUPP;
+
+put_dev:
+       nfc_put_device(dev);
+       return err;
 }
 
 /* message building helper */
index 9ca721c9fa71853ffb008e24eff80f3bcd6e035c..a71795355aecf5a4f3560f85eb02cb1953c5060f 100644 (file)
@@ -1861,7 +1861,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
        if (!vport->upcall_stats) {
                err = -ENOMEM;
-               goto err_destroy_portids;
+               goto err_destroy_vport;
        }
 
        err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
@@ -1876,6 +1876,8 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        ovs_notify(&dp_datapath_genl_family, reply, info);
        return 0;
 
+err_destroy_vport:
+       ovs_dp_detach_port(vport);
 err_destroy_portids:
        kfree(rcu_dereference_raw(dp->upcall_portids));
 err_unlock_and_destroy_meters:
@@ -2323,7 +2325,7 @@ restart:
        vport->upcall_stats = netdev_alloc_pcpu_stats(struct vport_upcall_stats_percpu);
        if (!vport->upcall_stats) {
                err = -ENOMEM;
-               goto exit_unlock_free;
+               goto exit_unlock_free_vport;
        }
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
@@ -2343,6 +2345,8 @@ restart:
        ovs_notify(&dp_vport_genl_family, reply, info);
        return 0;
 
+exit_unlock_free_vport:
+       ovs_dp_detach_port(vport);
 exit_unlock_free:
        ovs_unlock();
        kfree_skb(reply);
index e76d3459d78eefbbb873ad79849bf0fb0ffdaf76..ac5caf5a48e1616ab456df7a124a663156bac660 100644 (file)
@@ -10,6 +10,7 @@ rxrpc-y := \
        call_accept.o \
        call_event.o \
        call_object.o \
+       call_state.o \
        conn_client.o \
        conn_event.o \
        conn_object.o \
index 7ea576f6ba4bc8b6a40a0cf5d5767326c0befe6a..ebbd4a1c3f86e411fa740749019dfc8b6567253a 100644 (file)
@@ -155,10 +155,10 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
 
                if (service_id) {
                        write_lock(&local->services_lock);
-                       if (rcu_access_pointer(local->service))
+                       if (local->service)
                                goto service_in_use;
                        rx->local = local;
-                       rcu_assign_pointer(local->service, rx);
+                       local->service = rx;
                        write_unlock(&local->services_lock);
 
                        rx->sk.sk_state = RXRPC_SERVER_BOUND;
@@ -328,7 +328,6 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
                mutex_unlock(&call->user_mutex);
        }
 
-       rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
        _leave(" = %p", call);
        return call;
 }
@@ -374,13 +373,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
  * @sock: The socket the call is on
  * @call: The call to check
  *
- * Allow a kernel service to find out whether a call is still alive -
- * ie. whether it has completed.
+ * Allow a kernel service to find out whether a call is still alive - whether
+ * it has completed successfully and all received data has been consumed.
  */
 bool rxrpc_kernel_check_life(const struct socket *sock,
                             const struct rxrpc_call *call)
 {
-       return call->state != RXRPC_CALL_COMPLETE;
+       if (!rxrpc_call_is_complete(call))
+               return true;
+       if (call->completion != RXRPC_CALL_SUCCEEDED)
+               return false;
+       return !skb_queue_empty(&call->recvmsg_queue);
 }
 EXPORT_SYMBOL(rxrpc_kernel_check_life);
 
@@ -872,9 +875,9 @@ static int rxrpc_release_sock(struct sock *sk)
 
        sk->sk_state = RXRPC_CLOSE;
 
-       if (rx->local && rcu_access_pointer(rx->local->service) == rx) {
+       if (rx->local && rx->local->service == rx) {
                write_lock(&rx->local->services_lock);
-               rcu_assign_pointer(rx->local->service, NULL);
+               rx->local->service = NULL;
                write_unlock(&rx->local->services_lock);
        }
 
@@ -957,16 +960,9 @@ static const struct net_proto_family rxrpc_family_ops = {
 static int __init af_rxrpc_init(void)
 {
        int ret = -1;
-       unsigned int tmp;
 
        BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb));
 
-       get_random_bytes(&tmp, sizeof(tmp));
-       tmp &= 0x3fffffff;
-       if (tmp == 0)
-               tmp = 1;
-       idr_set_cursor(&rxrpc_client_conn_ids, tmp);
-
        ret = -ENOMEM;
        rxrpc_call_jar = kmem_cache_create(
                "rxrpc_call_jar", sizeof(struct rxrpc_call), 0,
@@ -1062,7 +1058,6 @@ static void __exit af_rxrpc_exit(void)
         * are released.
         */
        rcu_barrier();
-       rxrpc_destroy_client_conn_ids();
 
        destroy_workqueue(rxrpc_workqueue);
        rxrpc_exit_security();
index 18092526d3c8291831bd1b9438d4f713eee48f10..433060cade0381352d387da24df723b4b4565d27 100644 (file)
@@ -38,6 +38,7 @@ struct rxrpc_txbuf;
 enum rxrpc_skb_mark {
        RXRPC_SKB_MARK_PACKET,          /* Received packet */
        RXRPC_SKB_MARK_ERROR,           /* Error notification */
+       RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */
        RXRPC_SKB_MARK_REJECT_BUSY,     /* Reject with BUSY */
        RXRPC_SKB_MARK_REJECT_ABORT,    /* Reject with ABORT (code in skb->priority) */
 };
@@ -75,13 +76,7 @@ struct rxrpc_net {
 
        bool                    live;
 
-       bool                    kill_all_client_conns;
        atomic_t                nr_client_conns;
-       spinlock_t              client_conn_cache_lock; /* Lock for ->*_client_conns */
-       struct mutex            client_conn_discard_lock; /* Prevent multiple discarders */
-       struct list_head        idle_client_conns;
-       struct work_struct      client_conn_reaper;
-       struct timer_list       client_conn_reap_timer;
 
        struct hlist_head       local_endpoints;
        struct mutex            local_mutex;    /* Lock for ->local_endpoints */
@@ -202,6 +197,7 @@ struct rxrpc_host_header {
  * - max 48 bytes (struct sk_buff::cb)
  */
 struct rxrpc_skb_priv {
+       struct rxrpc_connection *conn;  /* Connection referred to (poke packet) */
        u16             offset;         /* Offset of data */
        u16             len;            /* Length of data */
        u8              flags;
@@ -262,13 +258,11 @@ struct rxrpc_security {
 
        /* respond to a challenge */
        int (*respond_to_challenge)(struct rxrpc_connection *,
-                                   struct sk_buff *,
-                                   u32 *);
+                                   struct sk_buff *);
 
        /* verify a response */
        int (*verify_response)(struct rxrpc_connection *,
-                              struct sk_buff *,
-                              u32 *);
+                              struct sk_buff *);
 
        /* clear connection security */
        void (*clear)(struct rxrpc_connection *);
@@ -283,22 +277,34 @@ struct rxrpc_local {
        struct rcu_head         rcu;
        atomic_t                active_users;   /* Number of users of the local endpoint */
        refcount_t              ref;            /* Number of references to the structure */
-       struct rxrpc_net        *rxnet;         /* The network ns in which this resides */
+       struct net              *net;           /* The network namespace */
+       struct rxrpc_net        *rxnet;         /* Our bits in the network namespace */
        struct hlist_node       link;
        struct socket           *socket;        /* my UDP socket */
        struct task_struct      *io_thread;
        struct completion       io_thread_ready; /* Indication that the I/O thread started */
-       struct rxrpc_sock __rcu *service;       /* Service(s) listening on this endpoint */
+       struct rxrpc_sock       *service;       /* Service(s) listening on this endpoint */
        struct rw_semaphore     defrag_sem;     /* control re-enablement of IP DF bit */
        struct sk_buff_head     rx_queue;       /* Received packets */
+       struct list_head        conn_attend_q;  /* Conns requiring immediate attention */
        struct list_head        call_attend_q;  /* Calls requiring immediate attention */
+
        struct rb_root          client_bundles; /* Client connection bundles by socket params */
        spinlock_t              client_bundles_lock; /* Lock for client_bundles */
+       bool                    kill_all_client_conns;
+       struct list_head        idle_client_conns;
+       struct timer_list       client_conn_reap_timer;
+       unsigned long           client_conn_flags;
+#define RXRPC_CLIENT_CONN_REAP_TIMER   0       /* The client conn reap timer expired */
+
        spinlock_t              lock;           /* access lock */
        rwlock_t                services_lock;  /* lock for services list */
        int                     debug_id;       /* debug ID for printks */
        bool                    dead;
        bool                    service_closed; /* Service socket closed */
+       struct idr              conn_ids;       /* List of connection IDs */
+       struct list_head        new_client_calls; /* Newly created client calls need connection */
+       spinlock_t              client_call_lock; /* Lock for ->new_client_calls */
        struct sockaddr_rxrpc   srx;            /* local address */
 };
 
@@ -356,7 +362,6 @@ struct rxrpc_conn_proto {
 
 struct rxrpc_conn_parameters {
        struct rxrpc_local      *local;         /* Representation of local endpoint */
-       struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct key              *key;           /* Security details */
        bool                    exclusive;      /* T if conn is exclusive */
        bool                    upgrade;        /* T if service ID can be upgraded */
@@ -364,11 +369,22 @@ struct rxrpc_conn_parameters {
        u32                     security_level; /* Security level selected */
 };
 
+/*
+ * Call completion condition (state == RXRPC_CALL_COMPLETE).
+ */
+enum rxrpc_call_completion {
+       RXRPC_CALL_SUCCEEDED,           /* - Normal termination */
+       RXRPC_CALL_REMOTELY_ABORTED,    /* - call aborted by peer */
+       RXRPC_CALL_LOCALLY_ABORTED,     /* - call aborted locally on error or close */
+       RXRPC_CALL_LOCAL_ERROR,         /* - call failed due to local error */
+       RXRPC_CALL_NETWORK_ERROR,       /* - call terminated by network error */
+       NR__RXRPC_CALL_COMPLETIONS
+};
+
 /*
  * Bits in the connection flags.
  */
 enum rxrpc_conn_flag {
-       RXRPC_CONN_HAS_IDR,             /* Has a client conn ID assigned */
        RXRPC_CONN_IN_SERVICE_CONNS,    /* Conn is in peer->service_conns */
        RXRPC_CONN_DONT_REUSE,          /* Don't reuse this connection */
        RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
@@ -388,6 +404,7 @@ enum rxrpc_conn_flag {
  */
 enum rxrpc_conn_event {
        RXRPC_CONN_EV_CHALLENGE,        /* Send challenge packet */
+       RXRPC_CONN_EV_ABORT_CALLS,      /* Abort attached calls */
 };
 
 /*
@@ -395,13 +412,13 @@ enum rxrpc_conn_event {
  */
 enum rxrpc_conn_proto_state {
        RXRPC_CONN_UNUSED,              /* Connection not yet attempted */
+       RXRPC_CONN_CLIENT_UNSECURED,    /* Client connection needs security init */
        RXRPC_CONN_CLIENT,              /* Client connection */
        RXRPC_CONN_SERVICE_PREALLOC,    /* Service connection preallocation */
        RXRPC_CONN_SERVICE_UNSECURED,   /* Service unsecured connection */
        RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */
        RXRPC_CONN_SERVICE,             /* Service secured connection */
-       RXRPC_CONN_REMOTELY_ABORTED,    /* Conn aborted by peer */
-       RXRPC_CONN_LOCALLY_ABORTED,     /* Conn aborted locally */
+       RXRPC_CONN_ABORTED,             /* Conn aborted */
        RXRPC_CONN__NR_STATES
 };
 
@@ -412,17 +429,16 @@ struct rxrpc_bundle {
        struct rxrpc_local      *local;         /* Representation of local endpoint */
        struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct key              *key;           /* Security details */
+       const struct rxrpc_security *security;  /* applied security module */
        refcount_t              ref;
        atomic_t                active;         /* Number of active users */
        unsigned int            debug_id;
        u32                     security_level; /* Security level selected */
        u16                     service_id;     /* Service ID for this connection */
        bool                    try_upgrade;    /* True if the bundle is attempting upgrade */
-       bool                    alloc_conn;     /* True if someone's getting a conn */
        bool                    exclusive;      /* T if conn is exclusive */
        bool                    upgrade;        /* T if service ID can be upgraded */
-       short                   alloc_error;    /* Error from last conn allocation */
-       spinlock_t              channel_lock;
+       unsigned short          alloc_error;    /* Error from last conn allocation */
        struct rb_node          local_node;     /* Node in local->client_conns */
        struct list_head        waiting_calls;  /* Calls waiting for channels */
        unsigned long           avail_chans;    /* Mask of available channels */
@@ -440,6 +456,7 @@ struct rxrpc_connection {
        struct rxrpc_peer       *peer;          /* Remote endpoint */
        struct rxrpc_net        *rxnet;         /* Network namespace to which call belongs */
        struct key              *key;           /* Security details */
+       struct list_head        attend_link;    /* Link in local->conn_attend_q */
 
        refcount_t              ref;
        atomic_t                active;         /* Active count for service conns */
@@ -449,7 +466,7 @@ struct rxrpc_connection {
        unsigned char           act_chans;      /* Mask of active channels */
        struct rxrpc_channel {
                unsigned long           final_ack_at;   /* Time at which to issue final ACK */
-               struct rxrpc_call __rcu *call;          /* Active call */
+               struct rxrpc_call       *call;          /* Active call */
                unsigned int            call_debug_id;  /* call->debug_id */
                u32                     call_id;        /* ID of current call */
                u32                     call_counter;   /* Call ID counter */
@@ -470,6 +487,7 @@ struct rxrpc_connection {
        struct list_head        link;           /* link in master connection list */
        struct sk_buff_head     rx_queue;       /* received conn-level packets */
 
+       struct mutex            security_lock;  /* Lock for security management */
        const struct rxrpc_security *security;  /* applied security module */
        union {
                struct {
@@ -483,7 +501,8 @@ struct rxrpc_connection {
        unsigned long           idle_timestamp; /* Time at which last became idle */
        spinlock_t              state_lock;     /* state-change lock */
        enum rxrpc_conn_proto_state state;      /* current state of connection */
-       u32                     abort_code;     /* Abort code of connection abort */
+       enum rxrpc_call_completion completion;  /* Completion condition */
+       s32                     abort_code;     /* Abort code of connection abort */
        int                     debug_id;       /* debug ID for printks */
        atomic_t                serial;         /* packet serial number counter */
        unsigned int            hi_serial;      /* highest serial number received */
@@ -527,7 +546,8 @@ enum rxrpc_call_flag {
        RXRPC_CALL_KERNEL,              /* The call was made by the kernel */
        RXRPC_CALL_UPGRADE,             /* Service upgrade was requested for the call */
        RXRPC_CALL_EXCLUSIVE,           /* The call uses a once-only connection */
-       RXRPC_CALL_RX_IS_IDLE,          /* Reception is idle - send an ACK */
+       RXRPC_CALL_RX_IS_IDLE,          /* recvmsg() is idle - send an ACK */
+       RXRPC_CALL_RECVMSG_READ_ALL,    /* recvmsg() read all of the received data */
 };
 
 /*
@@ -557,18 +577,6 @@ enum rxrpc_call_state {
        NR__RXRPC_CALL_STATES
 };
 
-/*
- * Call completion condition (state == RXRPC_CALL_COMPLETE).
- */
-enum rxrpc_call_completion {
-       RXRPC_CALL_SUCCEEDED,           /* - Normal termination */
-       RXRPC_CALL_REMOTELY_ABORTED,    /* - call aborted by peer */
-       RXRPC_CALL_LOCALLY_ABORTED,     /* - call aborted locally on error or close */
-       RXRPC_CALL_LOCAL_ERROR,         /* - call failed due to local error */
-       RXRPC_CALL_NETWORK_ERROR,       /* - call terminated by network error */
-       NR__RXRPC_CALL_COMPLETIONS
-};
-
 /*
  * Call Tx congestion management modes.
  */
@@ -587,6 +595,7 @@ enum rxrpc_congest_mode {
 struct rxrpc_call {
        struct rcu_head         rcu;
        struct rxrpc_connection *conn;          /* connection carrying call */
+       struct rxrpc_bundle     *bundle;        /* Connection bundle to use */
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_local      *local;         /* Representation of local endpoint */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
@@ -609,7 +618,7 @@ struct rxrpc_call {
        struct work_struct      destroyer;      /* In-process-context destroyer */
        rxrpc_notify_rx_t       notify_rx;      /* kernel service Rx notification function */
        struct list_head        link;           /* link in master call list */
-       struct list_head        chan_wait_link; /* Link in conn->bundle->waiting_calls */
+       struct list_head        wait_link;      /* Link in local->new_client_calls */
        struct hlist_node       error_link;     /* link in error distribution list */
        struct list_head        accept_link;    /* Link in rx->acceptq */
        struct list_head        recvmsg_link;   /* Link in rx->recvmsg_q */
@@ -623,10 +632,13 @@ struct rxrpc_call {
        unsigned long           flags;
        unsigned long           events;
        spinlock_t              notify_lock;    /* Kernel notification lock */
-       rwlock_t                state_lock;     /* lock for state transition */
-       u32                     abort_code;     /* Local/remote abort code */
+       unsigned int            send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */
+       s32                     send_abort;     /* Abort code to be sent */
+       short                   send_abort_err; /* Error to be associated with the abort */
+       rxrpc_seq_t             send_abort_seq; /* DATA packet that incurred the abort (or 0) */
+       s32                     abort_code;     /* Local/remote abort code */
        int                     error;          /* Local error incurred */
-       enum rxrpc_call_state   state;          /* current state of call */
+       enum rxrpc_call_state   _state;         /* Current state of call (needs barrier) */
        enum rxrpc_call_completion completion;  /* Call completion condition */
        refcount_t              ref;
        u8                      security_ix;    /* Security type */
@@ -812,9 +824,11 @@ extern struct workqueue_struct *rxrpc_workqueue;
  */
 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t);
 void rxrpc_discard_prealloc(struct rxrpc_sock *);
-int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *,
-                           struct rxrpc_connection *, struct sockaddr_rxrpc *,
-                           struct sk_buff *);
+bool rxrpc_new_incoming_call(struct rxrpc_local *local,
+                            struct rxrpc_peer *peer,
+                            struct rxrpc_connection *conn,
+                            struct sockaddr_rxrpc *peer_srx,
+                            struct sk_buff *skb);
 void rxrpc_accept_incoming_calls(struct rxrpc_local *);
 int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long);
 
@@ -834,7 +848,7 @@ void rxrpc_reduce_call_timer(struct rxrpc_call *call,
                             unsigned long now,
                             enum rxrpc_timer_trace why);
 
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb);
 
 /*
  * call_object.c
@@ -851,6 +865,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
                                         struct sockaddr_rxrpc *,
                                         struct rxrpc_call_params *, gfp_t,
                                         unsigned int);
+void rxrpc_start_call_timer(struct rxrpc_call *call);
 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *,
                         struct sk_buff *);
 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
@@ -872,33 +887,89 @@ static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
        return !rxrpc_is_service_call(call);
 }
 
+/*
+ * call_state.c
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+                              enum rxrpc_call_completion compl,
+                              u32 abort_code,
+                              int error);
+bool rxrpc_call_completed(struct rxrpc_call *call);
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+                     u32 abort_code, int error, enum rxrpc_abort_reason why);
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+                       int error);
+
+static inline void rxrpc_set_call_state(struct rxrpc_call *call,
+                                       enum rxrpc_call_state state)
+{
+       /* Order write of completion info before write of ->state. */
+       smp_store_release(&call->_state, state);
+       wake_up(&call->waitq);
+}
+
+static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call)
+{
+       return call->_state; /* Only inside I/O thread */
+}
+
+static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+       return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call)
+{
+       /* Order read ->state before read of completion info. */
+       return smp_load_acquire(&call->_state);
+}
+
+static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call)
+{
+       return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE;
+}
+
+static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call)
+{
+       return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED;
+}
+
 /*
  * conn_client.c
  */
 extern unsigned int rxrpc_reap_client_connections;
 extern unsigned long rxrpc_conn_idle_client_expiry;
 extern unsigned long rxrpc_conn_idle_client_fast_expiry;
-extern struct idr rxrpc_client_conn_ids;
 
-void rxrpc_destroy_client_conn_ids(void);
+void rxrpc_purge_client_connections(struct rxrpc_local *local);
 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
 void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace);
-int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *,
-                      struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *,
-                      gfp_t);
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp);
+void rxrpc_connect_client_calls(struct rxrpc_local *local);
 void rxrpc_expose_client_call(struct rxrpc_call *);
 void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *);
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
 void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace);
-void rxrpc_discard_expired_client_conns(struct work_struct *);
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *);
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local);
 void rxrpc_clean_up_local_conns(struct rxrpc_local *);
 
 /*
  * conn_event.c
  */
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb,
+                               unsigned int channel);
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+                    s32 abort_code, int err, enum rxrpc_abort_reason why);
 void rxrpc_process_connection(struct work_struct *);
 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool);
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb);
+
+static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn)
+{
+       /* Order reading the abort info after the state check. */
+       return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED;
+}
 
 /*
  * conn_object.c
@@ -906,6 +977,7 @@ int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb);
 extern unsigned int rxrpc_connection_expiry;
 extern unsigned int rxrpc_closed_conn_expiry;
 
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why);
 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t);
 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *,
                                                          struct sockaddr_rxrpc *,
@@ -961,12 +1033,19 @@ void rxrpc_implicit_end_call(struct rxrpc_call *, struct sk_buff *);
  */
 int rxrpc_encap_rcv(struct sock *, struct sk_buff *);
 void rxrpc_error_report(struct sock *);
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+                       s32 abort_code, int err);
 int rxrpc_io_thread(void *data);
 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local)
 {
        wake_up_process(local->io_thread);
 }
 
+static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+       return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO);
+}
+
 /*
  * insecure.c
  */
@@ -1048,6 +1127,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
 int rxrpc_send_abort_packet(struct rxrpc_call *);
 int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *);
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn);
 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb);
 void rxrpc_send_keepalive(struct rxrpc_peer *);
 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb);
@@ -1063,12 +1143,11 @@ void rxrpc_peer_keepalive_worker(struct work_struct *);
  */
 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *,
                                         const struct sockaddr_rxrpc *);
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *,
-                                    struct sockaddr_rxrpc *, gfp_t);
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
+                                    struct sockaddr_rxrpc *srx, gfp_t gfp);
 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t,
                                    enum rxrpc_peer_trace);
-void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *,
-                            struct rxrpc_peer *);
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer);
 void rxrpc_destroy_all_peers(struct rxrpc_net *);
 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace);
 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace);
@@ -1086,33 +1165,22 @@ extern const struct seq_operations rxrpc_local_seq_ops;
  * recvmsg.c
  */
 void rxrpc_notify_socket(struct rxrpc_call *);
-bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int);
-bool __rxrpc_call_completed(struct rxrpc_call *);
-bool rxrpc_call_completed(struct rxrpc_call *);
-bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
-bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int);
 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);
 
 /*
  * Abort a call due to a protocol error.
  */
-static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
-                                       struct sk_buff *skb,
-                                       const char *eproto_why,
-                                       const char *why,
-                                       u32 abort_code)
+static inline int rxrpc_abort_eproto(struct rxrpc_call *call,
+                                    struct sk_buff *skb,
+                                    s32 abort_code,
+                                    enum rxrpc_abort_reason why)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-       trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why);
-       return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO);
+       rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why);
+       return -EPROTO;
 }
 
-#define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \
-       __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \
-                            (abort_why), (abort_code))
-
 /*
  * rtt.c
  */
@@ -1144,6 +1212,8 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *,
 /*
  * sendmsg.c
  */
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+                        enum rxrpc_abort_reason why);
 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t);
 
 /*
index c02401656fa981dad5ef2eefb3c49a3fdda8b4c2..3e8689fdc437146fee49fc611d4fdec2530b18b6 100644 (file)
@@ -99,7 +99,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        if (!call)
                return -ENOMEM;
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
-       call->state = RXRPC_CALL_SERVER_PREALLOC;
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC);
        __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events);
 
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
@@ -280,7 +280,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
                                          (peer_tail + 1) &
                                          (RXRPC_BACKLOG_MAX - 1));
 
-                       rxrpc_new_incoming_peer(rx, local, peer);
+                       rxrpc_new_incoming_peer(local, peer);
                }
 
                /* Now allocate and set up the connection */
@@ -326,11 +326,11 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
  * If we want to report an error, we mark the skb with the packet type and
  * abort code and return false.
  */
-int rxrpc_new_incoming_call(struct rxrpc_local *local,
-                           struct rxrpc_peer *peer,
-                           struct rxrpc_connection *conn,
-                           struct sockaddr_rxrpc *peer_srx,
-                           struct sk_buff *skb)
+bool rxrpc_new_incoming_call(struct rxrpc_local *local,
+                            struct rxrpc_peer *peer,
+                            struct rxrpc_connection *conn,
+                            struct sockaddr_rxrpc *peer_srx,
+                            struct sk_buff *skb)
 {
        const struct rxrpc_security *sec = NULL;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
@@ -339,18 +339,17 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
 
        _enter("");
 
-       /* Don't set up a call for anything other than the first DATA packet. */
-       if (sp->hdr.seq != 1 ||
-           sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
-               return 0; /* Just discard */
+       /* Don't set up a call for anything other than a DATA packet. */
+       if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
+               return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call);
 
-       rcu_read_lock();
+       read_lock(&local->services_lock);
 
        /* Weed out packets to services we're not offering.  Packets that would
         * begin a call are explicitly rejected and the rest are just
         * discarded.
         */
-       rx = rcu_dereference(local->service);
+       rx = local->service;
        if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
                    sp->hdr.serviceId != rx->second_service)
            ) {
@@ -363,16 +362,14 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        if (!conn) {
                sec = rxrpc_get_incoming_security(rx, skb);
                if (!sec)
-                       goto reject;
+                       goto unsupported_security;
        }
 
        spin_lock(&rx->incoming_lock);
        if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED ||
            rx->sk.sk_state == RXRPC_CLOSE) {
-               trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber,
-                                 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = RX_INVALID_OPERATION;
+               rxrpc_direct_abort(skb, rxrpc_abort_shut_down,
+                                  RX_INVALID_OPERATION, -ESHUTDOWN);
                goto no_call;
        }
 
@@ -402,7 +399,7 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        spin_unlock(&conn->state_lock);
 
        spin_unlock(&rx->incoming_lock);
-       rcu_read_unlock();
+       read_unlock(&local->services_lock);
 
        if (hlist_unhashed(&call->error_link)) {
                spin_lock(&call->peer->lock);
@@ -413,22 +410,24 @@ int rxrpc_new_incoming_call(struct rxrpc_local *local,
        _leave(" = %p{%d}", call, call->debug_id);
        rxrpc_input_call_event(call, skb);
        rxrpc_put_call(call, rxrpc_call_put_input);
-       return 0;
+       return true;
 
 unsupported_service:
-       trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_INVALID_OPERATION, EOPNOTSUPP);
-       skb->priority = RX_INVALID_OPERATION;
-       goto reject;
+       read_unlock(&local->services_lock);
+       return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+                                 RX_INVALID_OPERATION, -EOPNOTSUPP);
+unsupported_security:
+       read_unlock(&local->services_lock);
+       return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered,
+                                 RX_INVALID_OPERATION, -EKEYREJECTED);
 no_call:
        spin_unlock(&rx->incoming_lock);
-reject:
-       rcu_read_unlock();
+       read_unlock(&local->services_lock);
        _leave(" = f [%u]", skb->mark);
-       return -EPROTO;
+       return false;
 discard:
-       rcu_read_unlock();
-       return 0;
+       read_unlock(&local->services_lock);
+       return true;
 }
 
 /*
index b2cf448fb02c0dcba93324754455955d287d16cf..1abdef15debce229cdc0ae93c94f463efc44d739 100644 (file)
@@ -251,6 +251,41 @@ out:
        _leave("");
 }
 
+/*
+ * Start transmitting the reply to a service.  This cancels the need to ACK the
+ * request if we haven't yet done so.
+ */
+static void rxrpc_begin_service_reply(struct rxrpc_call *call)
+{
+       unsigned long now = jiffies;
+
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY);
+       WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
+       if (call->ackr_reason == RXRPC_ACK_DELAY)
+               call->ackr_reason = 0;
+       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
+}
+
+/*
+ * Close the transmission phase.  After this point there is no more data to be
+ * transmitted in the call.
+ */
+static void rxrpc_close_tx_phase(struct rxrpc_call *call)
+{
+       _debug("________awaiting reply/ACK__________");
+
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+               rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+               break;
+       case RXRPC_CALL_SERVER_SEND_REPLY:
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK);
+               break;
+       default:
+               break;
+       }
+}
+
 static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
 {
        unsigned int winsize = min_t(unsigned int, call->tx_winsize,
@@ -270,9 +305,11 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
 {
        struct rxrpc_txbuf *txb;
 
-       if (rxrpc_is_client_call(call) &&
-           !test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+       if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+               if (list_empty(&call->tx_sendmsg))
+                       return;
                rxrpc_expose_client_call(call);
+       }
 
        while ((txb = list_first_entry_or_null(&call->tx_sendmsg,
                                               struct rxrpc_txbuf, call_link))) {
@@ -283,6 +320,9 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
                call->tx_top = txb->seq;
                list_add_tail(&txb->call_link, &call->tx_buffer);
 
+               if (txb->wire.flags & RXRPC_LAST_PACKET)
+                       rxrpc_close_tx_phase(call);
+
                rxrpc_transmit_one(call, txb);
 
                if (!rxrpc_tx_window_has_space(call))
@@ -292,16 +332,15 @@ static void rxrpc_decant_prepared_tx(struct rxrpc_call *call)
 
 static void rxrpc_transmit_some_data(struct rxrpc_call *call)
 {
-       switch (call->state) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_SERVER_ACK_REQUEST:
                if (list_empty(&call->tx_sendmsg))
                        return;
+               rxrpc_begin_service_reply(call);
                fallthrough;
 
        case RXRPC_CALL_SERVER_SEND_REPLY:
-       case RXRPC_CALL_SERVER_AWAIT_ACK:
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
-       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
                if (!rxrpc_tx_window_has_space(call))
                        return;
                if (list_empty(&call->tx_sendmsg)) {
@@ -331,21 +370,31 @@ static void rxrpc_send_initial_ping(struct rxrpc_call *call)
 /*
  * Handle retransmission and deferred ACK/abort generation.
  */
-void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
+bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
 {
        unsigned long now, next, t;
        rxrpc_serial_t ackr_serial;
        bool resend = false, expired = false;
+       s32 abort_code;
 
        rxrpc_see_call(call, rxrpc_call_see_input);
 
        //printk("\n--------------------\n");
        _enter("{%d,%s,%lx}",
-              call->debug_id, rxrpc_call_states[call->state], call->events);
+              call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)],
+              call->events);
 
-       if (call->state == RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call))
                goto out;
 
+       /* Handle abort request locklessly, vs rxrpc_propose_abort(). */
+       abort_code = smp_load_acquire(&call->send_abort);
+       if (abort_code) {
+               rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err,
+                                call->send_abort_why);
+               goto out;
+       }
+
        if (skb && skb->mark == RXRPC_SKB_MARK_ERROR)
                goto out;
 
@@ -358,7 +407,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
        t = READ_ONCE(call->expect_req_by);
-       if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+       if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST &&
            time_after_eq(now, t)) {
                trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
                expired = true;
@@ -429,11 +478,12 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
                    (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
                        trace_rxrpc_call_reset(call);
-                       rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET);
+                       rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET,
+                                        rxrpc_abort_call_reset);
                } else {
-                       rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME);
+                       rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME,
+                                        rxrpc_abort_call_timeout);
                }
-               rxrpc_send_abort_packet(call);
                goto out;
        }
 
@@ -441,7 +491,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_send_ACK(call, RXRPC_ACK_PING, 0,
                               rxrpc_propose_ack_ping_for_lost_ack);
 
-       if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY)
+       if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY)
                rxrpc_resend(call, NULL);
 
        if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags))
@@ -453,7 +503,7 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
                               rxrpc_propose_ack_input_data);
 
        /* Make sure the timer is restarted */
-       if (call->state != RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                next = call->expect_rx_by;
 
 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
@@ -474,9 +524,15 @@ void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
        }
 
 out:
-       if (call->state == RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call)) {
                del_timer_sync(&call->timer);
+               if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
+                       rxrpc_disconnect_call(call);
+               if (call->security)
+                       call->security->free_call_crypto(call);
+       }
        if (call->acks_hard_ack != call->tx_bottom)
                rxrpc_shrink_call_tx_buffer(call);
        _leave("");
+       return true;
 }
index 89dcf60b11587551adac4742f4f399c93d47b165..f3c9f0201c15617071db548179754354eed60335 100644 (file)
@@ -50,7 +50,7 @@ void rxrpc_poke_call(struct rxrpc_call *call, enum rxrpc_call_poke_trace what)
        struct rxrpc_local *local = call->local;
        bool busy;
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) {
                spin_lock_bh(&local->lock);
                busy = !list_empty(&call->attend_link);
                trace_rxrpc_poke_call(call, busy, what);
@@ -69,7 +69,7 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
 
        _enter("%d", call->debug_id);
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                trace_rxrpc_timer_expired(call, jiffies);
                rxrpc_poke_call(call, rxrpc_call_poke_timer);
        }
@@ -150,7 +150,7 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
        timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
        INIT_WORK(&call->destroyer, rxrpc_destroy_call);
        INIT_LIST_HEAD(&call->link);
-       INIT_LIST_HEAD(&call->chan_wait_link);
+       INIT_LIST_HEAD(&call->wait_link);
        INIT_LIST_HEAD(&call->accept_link);
        INIT_LIST_HEAD(&call->recvmsg_link);
        INIT_LIST_HEAD(&call->sock_link);
@@ -162,7 +162,6 @@ struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
        init_waitqueue_head(&call->waitq);
        spin_lock_init(&call->notify_lock);
        spin_lock_init(&call->tx_lock);
-       rwlock_init(&call->state_lock);
        refcount_set(&call->ref, 1);
        call->debug_id = debug_id;
        call->tx_total_len = -1;
@@ -211,7 +210,6 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
        now = ktime_get_real();
        call->acks_latest_ts    = now;
        call->cong_tstamp       = now;
-       call->state             = RXRPC_CALL_CLIENT_AWAIT_CONN;
        call->dest_srx          = *srx;
        call->interruptibility  = p->interruptibility;
        call->tx_total_len      = p->tx_total_len;
@@ -227,11 +225,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
 
        ret = rxrpc_init_client_call_security(call);
        if (ret < 0) {
-               __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
+               rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
                rxrpc_put_call(call, rxrpc_call_put_discard_error);
                return ERR_PTR(ret);
        }
 
+       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN);
+
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
                         p->user_call_ID, rxrpc_call_new_client);
 
@@ -242,7 +242,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
 /*
  * Initiate the call ack/resend/expiry timer.
  */
-static void rxrpc_start_call_timer(struct rxrpc_call *call)
+void rxrpc_start_call_timer(struct rxrpc_call *call)
 {
        unsigned long now = jiffies;
        unsigned long j = now + MAX_JIFFY_OFFSET;
@@ -286,6 +286,39 @@ static void rxrpc_put_call_slot(struct rxrpc_call *call)
        up(limiter);
 }
 
+/*
+ * Start the process of connecting a call.  We obtain a peer and a connection
+ * bundle, but the actual association of a call with a connection is offloaded
+ * to the I/O thread to simplify locking.
+ */
+static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp)
+{
+       struct rxrpc_local *local = call->local;
+       int ret = -ENOMEM;
+
+       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
+
+       call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp);
+       if (!call->peer)
+               goto error;
+
+       ret = rxrpc_look_up_bundle(call, gfp);
+       if (ret < 0)
+               goto error;
+
+       trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call);
+       rxrpc_get_call(call, rxrpc_call_get_io_thread);
+       spin_lock(&local->client_call_lock);
+       list_add_tail(&call->wait_link, &local->new_client_calls);
+       spin_unlock(&local->client_call_lock);
+       rxrpc_wake_up_io_thread(local);
+       return 0;
+
+error:
+       __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       return ret;
+}
+
 /*
  * Set up a call for the given parameters.
  * - Called with the socket lock held, which it must release.
@@ -365,14 +398,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        /* Set up or get a connection record and set the protocol parameters,
         * including channel number and call ID.
         */
-       ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
+       ret = rxrpc_connect_call(call, gfp);
        if (ret < 0)
                goto error_attached_to_socket;
 
-       rxrpc_see_call(call, rxrpc_call_see_connected);
-
-       rxrpc_start_call_timer(call);
-
        _leave(" = %p [new]", call);
        return call;
 
@@ -384,27 +413,23 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
 error_dup_user_ID:
        write_unlock(&rx->call_lock);
        release_sock(&rx->sk);
-       __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
-                                   RX_CALL_DEAD, -EEXIST);
+       rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST);
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0,
                         rxrpc_call_see_userid_exists);
-       rxrpc_release_call(rx, call);
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put_userid_exists);
        _leave(" = -EEXIST");
        return ERR_PTR(-EEXIST);
 
        /* We got an error, but the call is attached to the socket and is in
-        * need of release.  However, we might now race with recvmsg() when
-        * completing the call queues it.  Return 0 from sys_sendmsg() and
+        * need of release.  However, we might now race with recvmsg() when it
+        * completion notifies the socket.  Return 0 from sys_sendmsg() and
         * leave the error to recvmsg() to deal with.
         */
 error_attached_to_socket:
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret,
                         rxrpc_call_see_connect_failed);
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-       __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
-                                   RX_CALL_DEAD, ret);
+       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
        _leave(" = c=%08x [err]", call->debug_id);
        return call;
 }
@@ -427,32 +452,32 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        call->call_id           = sp->hdr.callNumber;
        call->dest_srx.srx_service = sp->hdr.serviceId;
        call->cid               = sp->hdr.cid;
-       call->state             = RXRPC_CALL_SERVER_SECURING;
        call->cong_tstamp       = skb->tstamp;
 
+       __set_bit(RXRPC_CALL_EXPOSED, &call->flags);
+       rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
+
        spin_lock(&conn->state_lock);
 
        switch (conn->state) {
        case RXRPC_CONN_SERVICE_UNSECURED:
        case RXRPC_CONN_SERVICE_CHALLENGING:
-               call->state = RXRPC_CALL_SERVER_SECURING;
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING);
                break;
        case RXRPC_CONN_SERVICE:
-               call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
                break;
 
-       case RXRPC_CONN_REMOTELY_ABORTED:
-               __rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
-                                           conn->abort_code, conn->error);
-               break;
-       case RXRPC_CONN_LOCALLY_ABORTED:
-               __rxrpc_abort_call("CON", call, 1,
-                                  conn->abort_code, conn->error);
+       case RXRPC_CONN_ABORTED:
+               rxrpc_set_call_completion(call, conn->completion,
+                                         conn->abort_code, conn->error);
                break;
        default:
                BUG();
        }
 
+       rxrpc_get_call(call, rxrpc_call_get_io_thread);
+
        /* Set the channel for this call.  We don't get channel_lock as we're
         * only defending against the data_ready handler (which we're called
         * from) and the RESPONSE packet parser (which is only really
@@ -462,7 +487,7 @@ void rxrpc_incoming_call(struct rxrpc_sock *rx,
        chan = sp->hdr.cid & RXRPC_CHANNELMASK;
        conn->channels[chan].call_counter = call->call_id;
        conn->channels[chan].call_id = call->call_id;
-       rcu_assign_pointer(conn->channels[chan].call, call);
+       conn->channels[chan].call = call;
        spin_unlock(&conn->state_lock);
 
        spin_lock(&conn->peer->lock);
@@ -522,20 +547,17 @@ static void rxrpc_cleanup_ring(struct rxrpc_call *call)
 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 {
        struct rxrpc_connection *conn = call->conn;
-       bool put = false;
+       bool put = false, putu = false;
 
        _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref));
 
        trace_rxrpc_call(call->debug_id, refcount_read(&call->ref),
                         call->flags, rxrpc_call_see_release);
 
-       ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
-
        if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
                BUG();
 
        rxrpc_put_call_slot(call);
-       del_timer_sync(&call->timer);
 
        /* Make sure we don't get any more notifications */
        write_lock(&rx->recvmsg_lock);
@@ -560,7 +582,7 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
        if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
                rb_erase(&call->sock_node, &rx->calls);
                memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
-               rxrpc_put_call(call, rxrpc_call_put_userid_exists);
+               putu = true;
        }
 
        list_del(&call->sock_link);
@@ -568,10 +590,9 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-       if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
-               rxrpc_disconnect_call(call);
-       if (call->security)
-               call->security->free_call_crypto(call);
+       if (putu)
+               rxrpc_put_call(call, rxrpc_call_put_userid);
+
        _leave("");
 }
 
@@ -588,7 +609,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->to_be_accepted.next,
                                  struct rxrpc_call, accept_link);
                list_del(&call->accept_link);
-               rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
+               rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+                                   rxrpc_abort_call_sock_release_tba);
                rxrpc_put_call(call, rxrpc_call_put_release_sock_tba);
        }
 
@@ -596,8 +618,8 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
                call = list_entry(rx->sock_calls.next,
                                  struct rxrpc_call, sock_link);
                rxrpc_get_call(call, rxrpc_call_get_release_sock);
-               rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
-               rxrpc_send_abort_packet(call);
+               rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET,
+                                   rxrpc_abort_call_sock_release);
                rxrpc_release_call(rx, call);
                rxrpc_put_call(call, rxrpc_call_put_release_sock);
        }
@@ -620,7 +642,7 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace why)
        dead = __refcount_dec_and_test(&call->ref, &r);
        trace_rxrpc_call(debug_id, r - 1, 0, why);
        if (dead) {
-               ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+               ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
 
                if (!list_empty(&call->link)) {
                        spin_lock(&rxnet->call_lock);
@@ -669,6 +691,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
 
        rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned);
        rxrpc_put_connection(call->conn, rxrpc_conn_put_call);
+       rxrpc_deactivate_bundle(call->bundle);
+       rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call);
        rxrpc_put_peer(call->peer, rxrpc_peer_put_call);
        rxrpc_put_local(call->local, rxrpc_local_put_call);
        call_rcu(&call->rcu, rxrpc_rcu_free_call);
@@ -681,7 +705,7 @@ void rxrpc_cleanup_call(struct rxrpc_call *call)
 {
        memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
 
-       ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
+       ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE);
        ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
 
        del_timer(&call->timer);
@@ -719,7 +743,7 @@ void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
 
                        pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
                               call, refcount_read(&call->ref),
-                              rxrpc_call_states[call->state],
+                              rxrpc_call_states[__rxrpc_call_state(call)],
                               call->flags, call->events);
 
                        spin_unlock(&rxnet->call_lock);
diff --git a/net/rxrpc/call_state.c b/net/rxrpc/call_state.c
new file mode 100644 (file)
index 0000000..6afb543
--- /dev/null
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Call state changing functions.
+ *
+ * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include "ar-internal.h"
+
+/*
+ * Transition a call to the complete state.
+ */
+bool rxrpc_set_call_completion(struct rxrpc_call *call,
+                                enum rxrpc_call_completion compl,
+                                u32 abort_code,
+                                int error)
+{
+       if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE)
+               return false;
+
+       call->abort_code = abort_code;
+       call->error = error;
+       call->completion = compl;
+       /* Allow reader of completion state to operate locklessly */
+       rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE);
+       trace_rxrpc_call_complete(call);
+       wake_up(&call->waitq);
+       rxrpc_notify_socket(call);
+       return true;
+}
+
+/*
+ * Record that a call successfully completed.
+ */
+bool rxrpc_call_completed(struct rxrpc_call *call)
+{
+       return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
+}
+
+/*
+ * Record that a call is locally aborted.
+ */
+bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq,
+                     u32 abort_code, int error, enum rxrpc_abort_reason why)
+{
+       trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
+                         abort_code, error);
+       if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
+                                      abort_code, error))
+               return false;
+       if (test_bit(RXRPC_CALL_EXPOSED, &call->flags))
+               rxrpc_send_abort_packet(call);
+       return true;
+}
+
+/*
+ * Record that a call errored out before even getting off the ground, thereby
+ * setting the state to allow it to be destroyed.
+ */
+void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl,
+                       int error)
+{
+       call->abort_code        = RX_CALL_DEAD;
+       call->error             = error;
+       call->completion        = compl;
+       call->_state            = RXRPC_CALL_COMPLETE;
+       trace_rxrpc_call_complete(call);
+       WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags));
+}
index 87efa0373aed346b17b801e977516b19eb90ad49..981ca5b98bcb9096fa9bd6ee3561d2792a17782f 100644 (file)
@@ -34,104 +34,59 @@ __read_mostly unsigned int rxrpc_reap_client_connections = 900;
 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
 
-/*
- * We use machine-unique IDs for our client connections.
- */
-DEFINE_IDR(rxrpc_client_conn_ids);
-static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
-
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle);
-
-/*
- * Get a connection ID and epoch for a client connection from the global pool.
- * The connection struct pointer is then recorded in the idr radix tree.  The
- * epoch doesn't change until the client is rebooted (or, at least, unless the
- * module is unloaded).
- */
-static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
-                                         gfp_t gfp)
+static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_net *rxnet = conn->rxnet;
-       int id;
-
-       _enter("");
-
-       idr_preload(gfp);
-       spin_lock(&rxrpc_conn_id_lock);
-
-       id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
-                             1, 0x40000000, GFP_NOWAIT);
-       if (id < 0)
-               goto error;
-
-       spin_unlock(&rxrpc_conn_id_lock);
-       idr_preload_end();
-
-       conn->proto.epoch = rxnet->epoch;
-       conn->proto.cid = id << RXRPC_CIDSHIFT;
-       set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
-       _leave(" [CID %x]", conn->proto.cid);
-       return 0;
-
-error:
-       spin_unlock(&rxrpc_conn_id_lock);
-       idr_preload_end();
-       _leave(" = %d", id);
-       return id;
+       atomic_inc(&bundle->active);
 }
 
 /*
- * Release a connection ID for a client connection from the global pool.
+ * Release a connection ID for a client connection.
  */
-static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
+static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
+                                          struct rxrpc_connection *conn)
 {
-       if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
-               spin_lock(&rxrpc_conn_id_lock);
-               idr_remove(&rxrpc_client_conn_ids,
-                          conn->proto.cid >> RXRPC_CIDSHIFT);
-               spin_unlock(&rxrpc_conn_id_lock);
-       }
+       idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
 }
 
 /*
  * Destroy the client connection ID tree.
  */
-void rxrpc_destroy_client_conn_ids(void)
+static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
 {
        struct rxrpc_connection *conn;
        int id;
 
-       if (!idr_is_empty(&rxrpc_client_conn_ids)) {
-               idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
+       if (!idr_is_empty(&local->conn_ids)) {
+               idr_for_each_entry(&local->conn_ids, conn, id) {
                        pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
                               conn, refcount_read(&conn->ref));
                }
                BUG();
        }
 
-       idr_destroy(&rxrpc_client_conn_ids);
+       idr_destroy(&local->conn_ids);
 }
 
 /*
  * Allocate a connection bundle.
  */
-static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp,
+static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
                                               gfp_t gfp)
 {
        struct rxrpc_bundle *bundle;
 
        bundle = kzalloc(sizeof(*bundle), gfp);
        if (bundle) {
-               bundle->local           = cp->local;
-               bundle->peer            = rxrpc_get_peer(cp->peer, rxrpc_peer_get_bundle);
-               bundle->key             = cp->key;
-               bundle->exclusive       = cp->exclusive;
-               bundle->upgrade         = cp->upgrade;
-               bundle->service_id      = cp->service_id;
-               bundle->security_level  = cp->security_level;
+               bundle->local           = call->local;
+               bundle->peer            = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
+               bundle->key             = key_get(call->key);
+               bundle->security        = call->security;
+               bundle->exclusive       = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
+               bundle->upgrade         = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
+               bundle->service_id      = call->dest_srx.srx_service;
+               bundle->security_level  = call->security_level;
                refcount_set(&bundle->ref, 1);
                atomic_set(&bundle->active, 1);
-               spin_lock_init(&bundle->channel_lock);
                INIT_LIST_HEAD(&bundle->waiting_calls);
                trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
        }
@@ -152,84 +107,87 @@ static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
 {
        trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free);
        rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
+       key_put(bundle->key);
        kfree(bundle);
 }
 
 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
 {
-       unsigned int id = bundle->debug_id;
+       unsigned int id;
        bool dead;
        int r;
 
-       dead = __refcount_dec_and_test(&bundle->ref, &r);
-       trace_rxrpc_bundle(id, r - 1, why);
-       if (dead)
-               rxrpc_free_bundle(bundle);
+       if (bundle) {
+               id = bundle->debug_id;
+               dead = __refcount_dec_and_test(&bundle->ref, &r);
+               trace_rxrpc_bundle(id, r - 1, why);
+               if (dead)
+                       rxrpc_free_bundle(bundle);
+       }
+}
+
+/*
+ * Get rid of outstanding client connection preallocations when a local
+ * endpoint is destroyed.
+ */
+void rxrpc_purge_client_connections(struct rxrpc_local *local)
+{
+       rxrpc_destroy_client_conn_ids(local);
 }
 
 /*
  * Allocate a client connection.
  */
 static struct rxrpc_connection *
-rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp)
+rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
 {
        struct rxrpc_connection *conn;
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
-       int ret;
+       struct rxrpc_local *local = bundle->local;
+       struct rxrpc_net *rxnet = local->rxnet;
+       int id;
 
        _enter("");
 
-       conn = rxrpc_alloc_connection(rxnet, gfp);
-       if (!conn) {
-               _leave(" = -ENOMEM");
+       conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
+       if (!conn)
                return ERR_PTR(-ENOMEM);
+
+       id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
+                             GFP_ATOMIC | __GFP_NOWARN);
+       if (id < 0) {
+               kfree(conn);
+               return ERR_PTR(id);
        }
 
        refcount_set(&conn->ref, 1);
-       conn->bundle            = bundle;
-       conn->local             = bundle->local;
-       conn->peer              = bundle->peer;
-       conn->key               = bundle->key;
+       conn->proto.cid         = id << RXRPC_CIDSHIFT;
+       conn->proto.epoch       = local->rxnet->epoch;
+       conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
+       conn->bundle            = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
+       conn->local             = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
+       conn->peer              = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
+       conn->key               = key_get(bundle->key);
+       conn->security          = bundle->security;
        conn->exclusive         = bundle->exclusive;
        conn->upgrade           = bundle->upgrade;
        conn->orig_service_id   = bundle->service_id;
        conn->security_level    = bundle->security_level;
-       conn->out_clientflag    = RXRPC_CLIENT_INITIATED;
-       conn->state             = RXRPC_CONN_CLIENT;
+       conn->state             = RXRPC_CONN_CLIENT_UNSECURED;
        conn->service_id        = conn->orig_service_id;
 
-       ret = rxrpc_get_client_connection_id(conn, gfp);
-       if (ret < 0)
-               goto error_0;
-
-       ret = rxrpc_init_client_conn_security(conn);
-       if (ret < 0)
-               goto error_1;
+       if (conn->security == &rxrpc_no_security)
+               conn->state     = RXRPC_CONN_CLIENT;
 
        atomic_inc(&rxnet->nr_conns);
        write_lock(&rxnet->conn_lock);
        list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
        write_unlock(&rxnet->conn_lock);
 
-       rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
-       rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn);
-       rxrpc_get_local(conn->local, rxrpc_local_get_client_conn);
-       key_get(conn->key);
-
-       trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
-                        rxrpc_conn_new_client);
+       rxrpc_see_connection(conn, rxrpc_conn_new_client);
 
        atomic_inc(&rxnet->nr_client_conns);
        trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
-       _leave(" = %p", conn);
        return conn;
-
-error_1:
-       rxrpc_put_client_connection_id(conn);
-error_0:
-       kfree(conn);
-       _leave(" = %d", ret);
-       return ERR_PTR(ret);
 }
 
 /*
@@ -247,7 +205,8 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
        if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
                goto dont_reuse;
 
-       if (conn->state != RXRPC_CONN_CLIENT ||
+       if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
+            conn->state != RXRPC_CONN_CLIENT) ||
            conn->proto.epoch != rxnet->epoch)
                goto mark_dont_reuse;
 
@@ -257,7 +216,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
         * times the maximum number of client conns away from the current
         * allocation point to try and keep the IDs concentrated.
         */
-       id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
+       id_cursor = idr_get_cursor(&conn->local->conn_ids);
        id = conn->proto.cid >> RXRPC_CIDSHIFT;
        distance = id - id_cursor;
        if (distance < 0)
@@ -278,20 +237,23 @@ dont_reuse:
  * Look up the conn bundle that matches the connection parameters, adding it if
  * it doesn't yet exist.
  */
-static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp,
-                                                gfp_t gfp)
+int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
 {
        static atomic_t rxrpc_bundle_id;
        struct rxrpc_bundle *bundle, *candidate;
-       struct rxrpc_local *local = cp->local;
+       struct rxrpc_local *local = call->local;
        struct rb_node *p, **pp, *parent;
        long diff;
+       bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
 
        _enter("{%px,%x,%u,%u}",
-              cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade);
+              call->peer, key_serial(call->key), call->security_level,
+              upgrade);
 
-       if (cp->exclusive)
-               return rxrpc_alloc_bundle(cp, gfp);
+       if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
+               call->bundle = rxrpc_alloc_bundle(call, gfp);
+               return call->bundle ? 0 : -ENOMEM;
+       }
 
        /* First, see if the bundle is already there. */
        _debug("search 1");
@@ -300,11 +262,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        while (p) {
                bundle = rb_entry(p, struct rxrpc_bundle, local_node);
 
-#define cmp(X) ((long)bundle->X - (long)cp->X)
-               diff = (cmp(peer) ?:
-                       cmp(key) ?:
-                       cmp(security_level) ?:
-                       cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+               diff = (cmp(bundle->peer, call->peer) ?:
+                       cmp(bundle->key, call->key) ?:
+                       cmp(bundle->security_level, call->security_level) ?:
+                       cmp(bundle->upgrade, upgrade));
 #undef cmp
                if (diff < 0)
                        p = p->rb_left;
@@ -317,9 +279,9 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        _debug("not found");
 
        /* It wasn't.  We need to add one. */
-       candidate = rxrpc_alloc_bundle(cp, gfp);
+       candidate = rxrpc_alloc_bundle(call, gfp);
        if (!candidate)
-               return NULL;
+               return -ENOMEM;
 
        _debug("search 2");
        spin_lock(&local->client_bundles_lock);
@@ -329,11 +291,11 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
                parent = *pp;
                bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
 
-#define cmp(X) ((long)bundle->X - (long)cp->X)
-               diff = (cmp(peer) ?:
-                       cmp(key) ?:
-                       cmp(security_level) ?:
-                       cmp(upgrade));
+#define cmp(X, Y) ((long)(X) - (long)(Y))
+               diff = (cmp(bundle->peer, call->peer) ?:
+                       cmp(bundle->key, call->key) ?:
+                       cmp(bundle->security_level, call->security_level) ?:
+                       cmp(bundle->upgrade, upgrade));
 #undef cmp
                if (diff < 0)
                        pp = &(*pp)->rb_left;
@@ -347,178 +309,89 @@ static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *c
        candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id);
        rb_link_node(&candidate->local_node, parent, pp);
        rb_insert_color(&candidate->local_node, &local->client_bundles);
-       rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
+       call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
        spin_unlock(&local->client_bundles_lock);
-       _leave(" = %u [new]", candidate->debug_id);
-       return candidate;
+       _leave(" = B=%u [new]", call->bundle->debug_id);
+       return 0;
 
 found_bundle_free:
        rxrpc_free_bundle(candidate);
 found_bundle:
-       rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
-       atomic_inc(&bundle->active);
+       call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
+       rxrpc_activate_bundle(bundle);
        spin_unlock(&local->client_bundles_lock);
-       _leave(" = %u [found]", bundle->debug_id);
-       return bundle;
-}
-
-/*
- * Create or find a client bundle to use for a call.
- *
- * If we return with a connection, the call will be on its waiting list.  It's
- * left to the caller to assign a channel and wake up the call.
- */
-static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx,
-                                           struct rxrpc_call *call,
-                                           struct rxrpc_conn_parameters *cp,
-                                           struct sockaddr_rxrpc *srx,
-                                           gfp_t gfp)
-{
-       struct rxrpc_bundle *bundle;
-
-       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
-       cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
-       if (!cp->peer)
-               goto error;
-
-       call->tx_last_sent = ktime_get_real();
-       call->cong_ssthresh = cp->peer->cong_ssthresh;
-       if (call->cong_cwnd >= call->cong_ssthresh)
-               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
-       else
-               call->cong_mode = RXRPC_CALL_SLOW_START;
-       if (cp->upgrade)
-               __set_bit(RXRPC_CALL_UPGRADE, &call->flags);
-
-       /* Find the client connection bundle. */
-       bundle = rxrpc_look_up_bundle(cp, gfp);
-       if (!bundle)
-               goto error;
-
-       /* Get this call queued.  Someone else may activate it whilst we're
-        * lining up a new connection, but that's fine.
-        */
-       spin_lock(&bundle->channel_lock);
-       list_add_tail(&call->chan_wait_link, &bundle->waiting_calls);
-       spin_unlock(&bundle->channel_lock);
-
-       _leave(" = [B=%x]", bundle->debug_id);
-       return bundle;
-
-error:
-       _leave(" = -ENOMEM");
-       return ERR_PTR(-ENOMEM);
+       _leave(" = B=%u [found]", call->bundle->debug_id);
+       return 0;
 }
 
 /*
  * Allocate a new connection and add it into a bundle.
  */
-static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp)
-       __releases(bundle->channel_lock)
+static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
+                                    unsigned int slot)
 {
-       struct rxrpc_connection *candidate = NULL, *old = NULL;
-       bool conflict;
-       int i;
-
-       _enter("");
-
-       conflict = bundle->alloc_conn;
-       if (!conflict)
-               bundle->alloc_conn = true;
-       spin_unlock(&bundle->channel_lock);
-       if (conflict) {
-               _leave(" [conf]");
-               return;
-       }
-
-       candidate = rxrpc_alloc_client_connection(bundle, gfp);
-
-       spin_lock(&bundle->channel_lock);
-       bundle->alloc_conn = false;
-
-       if (IS_ERR(candidate)) {
-               bundle->alloc_error = PTR_ERR(candidate);
-               spin_unlock(&bundle->channel_lock);
-               _leave(" [err %ld]", PTR_ERR(candidate));
-               return;
-       }
-
-       bundle->alloc_error = 0;
-
-       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
-               unsigned int shift = i * RXRPC_MAXCALLS;
-               int j;
-
-               old = bundle->conns[i];
-               if (!rxrpc_may_reuse_conn(old)) {
-                       if (old)
-                               trace_rxrpc_client(old, -1, rxrpc_client_replace);
-                       candidate->bundle_shift = shift;
-                       atomic_inc(&bundle->active);
-                       bundle->conns[i] = candidate;
-                       for (j = 0; j < RXRPC_MAXCALLS; j++)
-                               set_bit(shift + j, &bundle->avail_chans);
-                       candidate = NULL;
-                       break;
-               }
+       struct rxrpc_connection *conn, *old;
+       unsigned int shift = slot * RXRPC_MAXCALLS;
+       unsigned int i;
 
-               old = NULL;
+       old = bundle->conns[slot];
+       if (old) {
+               bundle->conns[slot] = NULL;
+               trace_rxrpc_client(old, -1, rxrpc_client_replace);
+               rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
        }
 
-       spin_unlock(&bundle->channel_lock);
-
-       if (candidate) {
-               _debug("discard C=%x", candidate->debug_id);
-               trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
-               rxrpc_put_connection(candidate, rxrpc_conn_put_discard);
+       conn = rxrpc_alloc_client_connection(bundle);
+       if (IS_ERR(conn)) {
+               bundle->alloc_error = PTR_ERR(conn);
+               return false;
        }
 
-       rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
-       _leave("");
+       rxrpc_activate_bundle(bundle);
+       conn->bundle_shift = shift;
+       bundle->conns[slot] = conn;
+       for (i = 0; i < RXRPC_MAXCALLS; i++)
+               set_bit(shift + i, &bundle->avail_chans);
+       return true;
 }
 
 /*
  * Add a connection to a bundle if there are no usable connections or we have
  * connections waiting for extra capacity.
  */
-static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp)
+static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_call *call;
-       int i, usable;
+       int slot = -1, i, usable;
 
        _enter("");
 
-       spin_lock(&bundle->channel_lock);
+       bundle->alloc_error = 0;
 
        /* See if there are any usable connections. */
        usable = 0;
-       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++)
+       for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
                if (rxrpc_may_reuse_conn(bundle->conns[i]))
                        usable++;
-
-       if (!usable && !list_empty(&bundle->waiting_calls)) {
-               call = list_first_entry(&bundle->waiting_calls,
-                                       struct rxrpc_call, chan_wait_link);
-               if (test_bit(RXRPC_CALL_UPGRADE, &call->flags))
-                       bundle->try_upgrade = true;
+               else if (slot == -1)
+                       slot = i;
        }
 
+       if (!usable && bundle->upgrade)
+               bundle->try_upgrade = true;
+
        if (!usable)
                goto alloc_conn;
 
        if (!bundle->avail_chans &&
            !bundle->try_upgrade &&
-           !list_empty(&bundle->waiting_calls) &&
            usable < ARRAY_SIZE(bundle->conns))
                goto alloc_conn;
 
-       spin_unlock(&bundle->channel_lock);
        _leave("");
-       return;
+       return usable;
 
 alloc_conn:
-       return rxrpc_add_conn_to_bundle(bundle, gfp);
+       return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
 }
 
 /*
@@ -532,11 +405,13 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        struct rxrpc_channel *chan = &conn->channels[channel];
        struct rxrpc_bundle *bundle = conn->bundle;
        struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
-                                            struct rxrpc_call, chan_wait_link);
+                                            struct rxrpc_call, wait_link);
        u32 call_id = chan->call_counter + 1;
 
        _enter("C=%x,%u", conn->debug_id, channel);
 
+       list_del_init(&call->wait_link);
+
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
 
        /* Cancel the final ACK on the previous call if it hasn't been sent yet
@@ -546,68 +421,50 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
        clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
 
        rxrpc_see_call(call, rxrpc_call_see_activate_client);
-       list_del_init(&call->chan_wait_link);
-       call->peer      = rxrpc_get_peer(conn->peer, rxrpc_peer_get_activate_call);
        call->conn      = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
        call->cid       = conn->proto.cid | channel;
        call->call_id   = call_id;
        call->dest_srx.srx_service = conn->service_id;
-
-       trace_rxrpc_connect_call(call);
-
-       write_lock(&call->state_lock);
-       call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
-       write_unlock(&call->state_lock);
-
-       /* Paired with the read barrier in rxrpc_connect_call().  This orders
-        * cid and epoch in the connection wrt to call_id without the need to
-        * take the channel_lock.
-        *
-        * We provisionally assign a callNumber at this point, but we don't
-        * confirm it until the call is about to be exposed.
-        *
-        * TODO: Pair with a barrier in the data_ready handler when that looks
-        * at the call ID through a connection channel.
-        */
-       smp_wmb();
+       call->cong_ssthresh = call->peer->cong_ssthresh;
+       if (call->cong_cwnd >= call->cong_ssthresh)
+               call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
+       else
+               call->cong_mode = RXRPC_CALL_SLOW_START;
 
        chan->call_id           = call_id;
        chan->call_debug_id     = call->debug_id;
-       rcu_assign_pointer(chan->call, call);
+       chan->call              = call;
+
+       rxrpc_see_call(call, rxrpc_call_see_connected);
+       trace_rxrpc_connect_call(call);
+       call->tx_last_sent = ktime_get_real();
+       rxrpc_start_call_timer(call);
+       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
        wake_up(&call->waitq);
 }
 
 /*
  * Remove a connection from the idle list if it's on it.
  */
-static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn)
+static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
 {
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
-       bool drop_ref;
-
        if (!list_empty(&conn->cache_link)) {
-               drop_ref = false;
-               spin_lock(&rxnet->client_conn_cache_lock);
-               if (!list_empty(&conn->cache_link)) {
-                       list_del_init(&conn->cache_link);
-                       drop_ref = true;
-               }
-               spin_unlock(&rxnet->client_conn_cache_lock);
-               if (drop_ref)
-                       rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
+               list_del_init(&conn->cache_link);
+               rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
        }
 }
 
 /*
- * Assign channels and callNumbers to waiting calls with channel_lock
- * held by caller.
+ * Assign channels and callNumbers to waiting calls.
  */
-static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
+static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
 {
        struct rxrpc_connection *conn;
        unsigned long avail, mask;
        unsigned int channel, slot;
 
+       trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
+
        if (bundle->try_upgrade)
                mask = 1;
        else
@@ -627,7 +484,7 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
 
                if (bundle->try_upgrade)
                        set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
-               rxrpc_unidle_conn(bundle, conn);
+               rxrpc_unidle_conn(conn);
 
                channel &= (RXRPC_MAXCALLS - 1);
                conn->act_chans |= 1 << channel;
@@ -636,132 +493,24 @@ static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle)
 }
 
 /*
- * Assign channels and callNumbers to waiting calls.
- */
-static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
-{
-       _enter("B=%x", bundle->debug_id);
-
-       trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
-
-       if (!bundle->avail_chans)
-               return;
-
-       spin_lock(&bundle->channel_lock);
-       rxrpc_activate_channels_locked(bundle);
-       spin_unlock(&bundle->channel_lock);
-       _leave("");
-}
-
-/*
- * Wait for a callNumber and a channel to be granted to a call.
- */
-static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle,
-                                 struct rxrpc_call *call, gfp_t gfp)
-{
-       DECLARE_WAITQUEUE(myself, current);
-       int ret = 0;
-
-       _enter("%d", call->debug_id);
-
-       if (!gfpflags_allow_blocking(gfp)) {
-               rxrpc_maybe_add_conn(bundle, gfp);
-               rxrpc_activate_channels(bundle);
-               ret = bundle->alloc_error ?: -EAGAIN;
-               goto out;
-       }
-
-       add_wait_queue_exclusive(&call->waitq, &myself);
-       for (;;) {
-               rxrpc_maybe_add_conn(bundle, gfp);
-               rxrpc_activate_channels(bundle);
-               ret = bundle->alloc_error;
-               if (ret < 0)
-                       break;
-
-               switch (call->interruptibility) {
-               case RXRPC_INTERRUPTIBLE:
-               case RXRPC_PREINTERRUPTIBLE:
-                       set_current_state(TASK_INTERRUPTIBLE);
-                       break;
-               case RXRPC_UNINTERRUPTIBLE:
-               default:
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       break;
-               }
-               if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN)
-                       break;
-               if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
-                    call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
-                   signal_pending(current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-               schedule();
-       }
-       remove_wait_queue(&call->waitq, &myself);
-       __set_current_state(TASK_RUNNING);
-
-out:
-       _leave(" = %d", ret);
-       return ret;
-}
-
-/*
- * find a connection for a call
- * - called in process context with IRQs enabled
+ * Connect waiting channels (called from the I/O thread).
  */
-int rxrpc_connect_call(struct rxrpc_sock *rx,
-                      struct rxrpc_call *call,
-                      struct rxrpc_conn_parameters *cp,
-                      struct sockaddr_rxrpc *srx,
-                      gfp_t gfp)
+void rxrpc_connect_client_calls(struct rxrpc_local *local)
 {
-       struct rxrpc_bundle *bundle;
-       struct rxrpc_net *rxnet = cp->local->rxnet;
-       int ret = 0;
-
-       _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
-
-       rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
+       struct rxrpc_call *call;
 
-       bundle = rxrpc_prep_call(rx, call, cp, srx, gfp);
-       if (IS_ERR(bundle)) {
-               ret = PTR_ERR(bundle);
-               goto out;
-       }
+       while ((call = list_first_entry_or_null(&local->new_client_calls,
+                                               struct rxrpc_call, wait_link))
+              ) {
+               struct rxrpc_bundle *bundle = call->bundle;
 
-       if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) {
-               ret = rxrpc_wait_for_channel(bundle, call, gfp);
-               if (ret < 0)
-                       goto wait_failed;
-       }
+               spin_lock(&local->client_call_lock);
+               list_move_tail(&call->wait_link, &bundle->waiting_calls);
+               spin_unlock(&local->client_call_lock);
 
-granted_channel:
-       /* Paired with the write barrier in rxrpc_activate_one_channel(). */
-       smp_rmb();
-
-out_put_bundle:
-       rxrpc_deactivate_bundle(bundle);
-       rxrpc_put_bundle(bundle, rxrpc_bundle_get_client_call);
-out:
-       _leave(" = %d", ret);
-       return ret;
-
-wait_failed:
-       spin_lock(&bundle->channel_lock);
-       list_del_init(&call->chan_wait_link);
-       spin_unlock(&bundle->channel_lock);
-
-       if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) {
-               ret = 0;
-               goto granted_channel;
+               if (rxrpc_bundle_has_space(bundle))
+                       rxrpc_activate_channels(bundle);
        }
-
-       trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
-       rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret);
-       rxrpc_disconnect_client_call(bundle, call);
-       goto out_put_bundle;
 }
 
 /*
@@ -794,14 +543,14 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
 /*
  * Set the reap timer.
  */
-static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
 {
-       if (!rxnet->kill_all_client_conns) {
+       if (!local->kill_all_client_conns) {
                unsigned long now = jiffies;
                unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
 
-               if (rxnet->live)
-                       timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+               if (local->rxnet->live)
+                       timer_reduce(&local->client_conn_reap_timer, reap_at);
        }
 }
 
@@ -812,16 +561,13 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
 {
        struct rxrpc_connection *conn;
        struct rxrpc_channel *chan = NULL;
-       struct rxrpc_net *rxnet = bundle->local->rxnet;
+       struct rxrpc_local *local = bundle->local;
        unsigned int channel;
        bool may_reuse;
        u32 cid;
 
        _enter("c=%x", call->debug_id);
 
-       spin_lock(&bundle->channel_lock);
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-
        /* Calls that have never actually been assigned a channel can simply be
         * discarded.
         */
@@ -830,8 +576,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                _debug("call is waiting");
                ASSERTCMP(call->call_id, ==, 0);
                ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
-               list_del_init(&call->chan_wait_link);
-               goto out;
+               list_del_init(&call->wait_link);
+               return;
        }
 
        cid = call->cid;
@@ -839,10 +585,8 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
        chan = &conn->channels[channel];
        trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
 
-       if (rcu_access_pointer(chan->call) != call) {
-               spin_unlock(&bundle->channel_lock);
-               BUG();
-       }
+       if (WARN_ON(chan->call != call))
+               return;
 
        may_reuse = rxrpc_may_reuse_conn(conn);
 
@@ -863,16 +607,15 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                        trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
                        bundle->try_upgrade = false;
                        if (may_reuse)
-                               rxrpc_activate_channels_locked(bundle);
+                               rxrpc_activate_channels(bundle);
                }
-
        }
 
        /* See if we can pass the channel directly to another call. */
        if (may_reuse && !list_empty(&bundle->waiting_calls)) {
                trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
                rxrpc_activate_one_channel(conn, channel);
-               goto out;
+               return;
        }
 
        /* Schedule the final ACK to be transmitted in a short while so that it
@@ -890,7 +633,7 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
        }
 
        /* Deactivate the channel. */
-       rcu_assign_pointer(chan->call, NULL);
+       chan->call = NULL;
        set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
        conn->act_chans &= ~(1 << channel);
 
@@ -903,17 +646,10 @@ void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call
                conn->idle_timestamp = jiffies;
 
                rxrpc_get_connection(conn, rxrpc_conn_get_idle);
-               spin_lock(&rxnet->client_conn_cache_lock);
-               list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
-               spin_unlock(&rxnet->client_conn_cache_lock);
+               list_move_tail(&conn->cache_link, &local->idle_client_conns);
 
-               rxrpc_set_client_reap_timer(rxnet);
+               rxrpc_set_client_reap_timer(local);
        }
-
-out:
-       spin_unlock(&bundle->channel_lock);
-       _leave("");
-       return;
 }
 
 /*
@@ -923,7 +659,6 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
 {
        struct rxrpc_bundle *bundle = conn->bundle;
        unsigned int bindex;
-       bool need_drop = false;
        int i;
 
        _enter("C=%x", conn->debug_id);
@@ -931,18 +666,13 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
        if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
                rxrpc_process_delayed_final_acks(conn, true);
 
-       spin_lock(&bundle->channel_lock);
        bindex = conn->bundle_shift / RXRPC_MAXCALLS;
        if (bundle->conns[bindex] == conn) {
                _debug("clear slot %u", bindex);
                bundle->conns[bindex] = NULL;
                for (i = 0; i < RXRPC_MAXCALLS; i++)
                        clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
-               need_drop = true;
-       }
-       spin_unlock(&bundle->channel_lock);
-
-       if (need_drop) {
+               rxrpc_put_client_connection_id(bundle->local, conn);
                rxrpc_deactivate_bundle(bundle);
                rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
        }
@@ -951,11 +681,15 @@ static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
 /*
  * Drop the active count on a bundle.
  */
-static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
+void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
 {
-       struct rxrpc_local *local = bundle->local;
+       struct rxrpc_local *local;
        bool need_put = false;
 
+       if (!bundle)
+               return;
+
+       local = bundle->local;
        if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
                if (!bundle->exclusive) {
                        _debug("erase bundle");
@@ -982,7 +716,7 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
        trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
        atomic_dec(&rxnet->nr_client_conns);
 
-       rxrpc_put_client_connection_id(conn);
+       rxrpc_put_client_connection_id(local, conn);
 }
 
 /*
@@ -992,42 +726,26 @@ void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
  * This may be called from conn setup or from a work item so cannot be
  * considered non-reentrant.
  */
-void rxrpc_discard_expired_client_conns(struct work_struct *work)
+void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
 {
        struct rxrpc_connection *conn;
-       struct rxrpc_net *rxnet =
-               container_of(work, struct rxrpc_net, client_conn_reaper);
        unsigned long expiry, conn_expires_at, now;
        unsigned int nr_conns;
 
        _enter("");
 
-       if (list_empty(&rxnet->idle_client_conns)) {
-               _leave(" [empty]");
-               return;
-       }
-
-       /* Don't double up on the discarding */
-       if (!mutex_trylock(&rxnet->client_conn_discard_lock)) {
-               _leave(" [already]");
-               return;
-       }
-
        /* We keep an estimate of what the number of conns ought to be after
         * we've discarded some so that we don't overdo the discarding.
         */
-       nr_conns = atomic_read(&rxnet->nr_client_conns);
+       nr_conns = atomic_read(&local->rxnet->nr_client_conns);
 
 next:
-       spin_lock(&rxnet->client_conn_cache_lock);
-
-       if (list_empty(&rxnet->idle_client_conns))
-               goto out;
-
-       conn = list_entry(rxnet->idle_client_conns.next,
-                         struct rxrpc_connection, cache_link);
+       conn = list_first_entry_or_null(&local->idle_client_conns,
+                                       struct rxrpc_connection, cache_link);
+       if (!conn)
+               return;
 
-       if (!rxnet->kill_all_client_conns) {
+       if (!local->kill_all_client_conns) {
                /* If the number of connections is over the reap limit, we
                 * expedite discard by reducing the expiry timeout.  We must,
                 * however, have at least a short grace period to be able to do
@@ -1050,8 +768,6 @@ next:
        trace_rxrpc_client(conn, -1, rxrpc_client_discard);
        list_del_init(&conn->cache_link);
 
-       spin_unlock(&rxnet->client_conn_cache_lock);
-
        rxrpc_unbundle_conn(conn);
        /* Drop the ->cache_link ref */
        rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
@@ -1068,31 +784,8 @@ not_yet_expired:
         * then things get messier.
         */
        _debug("not yet");
-       if (!rxnet->kill_all_client_conns)
-               timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at);
-
-out:
-       spin_unlock(&rxnet->client_conn_cache_lock);
-       mutex_unlock(&rxnet->client_conn_discard_lock);
-       _leave("");
-}
-
-/*
- * Preemptively destroy all the client connection records rather than waiting
- * for them to time out
- */
-void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
-{
-       _enter("");
-
-       spin_lock(&rxnet->client_conn_cache_lock);
-       rxnet->kill_all_client_conns = true;
-       spin_unlock(&rxnet->client_conn_cache_lock);
-
-       del_timer_sync(&rxnet->client_conn_reap_timer);
-
-       if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
-               _debug("destroy: queue failed");
+       if (!local->kill_all_client_conns)
+               timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
 
        _leave("");
 }
@@ -1102,29 +795,19 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
  */
 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
 {
-       struct rxrpc_connection *conn, *tmp;
-       struct rxrpc_net *rxnet = local->rxnet;
-       LIST_HEAD(graveyard);
+       struct rxrpc_connection *conn;
 
        _enter("");
 
-       spin_lock(&rxnet->client_conn_cache_lock);
-
-       list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
-                                cache_link) {
-               if (conn->local == local) {
-                       atomic_dec(&conn->active);
-                       trace_rxrpc_client(conn, -1, rxrpc_client_discard);
-                       list_move(&conn->cache_link, &graveyard);
-               }
-       }
+       local->kill_all_client_conns = true;
 
-       spin_unlock(&rxnet->client_conn_cache_lock);
+       del_timer_sync(&local->client_conn_reap_timer);
 
-       while (!list_empty(&graveyard)) {
-               conn = list_entry(graveyard.next,
-                                 struct rxrpc_connection, cache_link);
+       while ((conn = list_first_entry_or_null(&local->idle_client_conns,
+                                               struct rxrpc_connection, cache_link))) {
                list_del_init(&conn->cache_link);
+               atomic_dec(&conn->active);
+               trace_rxrpc_client(conn, -1, rxrpc_client_discard);
                rxrpc_unbundle_conn(conn);
                rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);
        }
index 480364bcbf855755a0f6c43ab627c7e14c8071f8..44414e724415e907b05a65d1c51d322573148ba9 100644 (file)
 #include <net/ip.h>
 #include "ar-internal.h"
 
+/*
+ * Set the completion state on an aborted connection.
+ */
+static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb,
+                                  s32 abort_code, int err,
+                                  enum rxrpc_call_completion compl)
+{
+       bool aborted = false;
+
+       if (conn->state != RXRPC_CONN_ABORTED) {
+               spin_lock(&conn->state_lock);
+               if (conn->state != RXRPC_CONN_ABORTED) {
+                       conn->abort_code = abort_code;
+                       conn->error      = err;
+                       conn->completion = compl;
+                       /* Order the abort info before the state change. */
+                       smp_store_release(&conn->state, RXRPC_CONN_ABORTED);
+                       set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
+                       set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events);
+                       aborted = true;
+               }
+               spin_unlock(&conn->state_lock);
+       }
+
+       return aborted;
+}
+
+/*
+ * Mark a socket buffer to indicate that the connection it's on should be aborted.
+ */
+int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb,
+                    s32 abort_code, int err, enum rxrpc_abort_reason why)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       if (rxrpc_set_conn_aborted(conn, skb, abort_code, err,
+                                  RXRPC_CALL_LOCALLY_ABORTED)) {
+               trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber,
+                                 sp->hdr.seq, abort_code, err);
+               rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort);
+       }
+       return -EPROTO;
+}
+
+/*
+ * Mark a connection as being remotely aborted.
+ */
+static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn,
+                                  struct sk_buff *skb)
+{
+       return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED,
+                                     RXRPC_CALL_REMOTELY_ABORTED);
+}
+
 /*
  * Retransmit terminal ACK or ABORT of the previous call.
  */
-static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
-                                      struct sk_buff *skb,
-                                      unsigned int channel)
+void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
+                               struct sk_buff *skb,
+                               unsigned int channel)
 {
        struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
        struct rxrpc_channel *chan;
@@ -46,9 +100,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        /* If the last call got moved on whilst we were waiting to run, just
         * ignore this packet.
         */
-       call_id = READ_ONCE(chan->last_call);
-       /* Sync with __rxrpc_disconnect_call() */
-       smp_rmb();
+       call_id = chan->last_call;
        if (skb && call_id != sp->hdr.callNumber)
                return;
 
@@ -65,9 +117,12 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
        iov[2].iov_base = &ack_info;
        iov[2].iov_len  = sizeof(ack_info);
 
+       serial = atomic_inc_return(&conn->serial);
+
        pkt.whdr.epoch          = htonl(conn->proto.epoch);
        pkt.whdr.cid            = htonl(conn->proto.cid | channel);
        pkt.whdr.callNumber     = htonl(call_id);
+       pkt.whdr.serial         = htonl(serial);
        pkt.whdr.seq            = 0;
        pkt.whdr.type           = chan->last_type;
        pkt.whdr.flags          = conn->out_clientflag;
@@ -104,31 +159,15 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
                iov[0].iov_len += sizeof(pkt.ack);
                len += sizeof(pkt.ack) + 3 + sizeof(ack_info);
                ioc = 3;
-               break;
-
-       default:
-               return;
-       }
-
-       /* Resync with __rxrpc_disconnect_call() and check that the last call
-        * didn't get advanced whilst we were filling out the packets.
-        */
-       smp_rmb();
-       if (READ_ONCE(chan->last_call) != call_id)
-               return;
-
-       serial = atomic_inc_return(&conn->serial);
-       pkt.whdr.serial = htonl(serial);
 
-       switch (chan->last_type) {
-       case RXRPC_PACKET_TYPE_ABORT:
-               break;
-       case RXRPC_PACKET_TYPE_ACK:
                trace_rxrpc_tx_ack(chan->call_debug_id, serial,
                                   ntohl(pkt.ack.firstPacket),
                                   ntohl(pkt.ack.serial),
                                   pkt.ack.reason, 0);
                break;
+
+       default:
+               return;
        }
 
        ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len);
@@ -146,131 +185,34 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
 /*
  * pass a connection-level abort onto all calls on that connection
  */
-static void rxrpc_abort_calls(struct rxrpc_connection *conn,
-                             enum rxrpc_call_completion compl,
-                             rxrpc_serial_t serial)
+static void rxrpc_abort_calls(struct rxrpc_connection *conn)
 {
        struct rxrpc_call *call;
        int i;
 
        _enter("{%d},%x", conn->debug_id, conn->abort_code);
 
-       spin_lock(&conn->bundle->channel_lock);
-
        for (i = 0; i < RXRPC_MAXCALLS; i++) {
-               call = rcu_dereference_protected(
-                       conn->channels[i].call,
-                       lockdep_is_held(&conn->bundle->channel_lock));
-               if (call) {
-                       if (compl == RXRPC_CALL_LOCALLY_ABORTED)
-                               trace_rxrpc_abort(call->debug_id,
-                                                 "CON", call->cid,
-                                                 call->call_id, 0,
+               call = conn->channels[i].call;
+               if (call)
+                       rxrpc_set_call_completion(call,
+                                                 conn->completion,
                                                  conn->abort_code,
                                                  conn->error);
-                       else
-                               trace_rxrpc_rx_abort(call, serial,
-                                                    conn->abort_code);
-                       rxrpc_set_call_completion(call, compl,
-                                                 conn->abort_code,
-                                                 conn->error);
-               }
        }
 
-       spin_unlock(&conn->bundle->channel_lock);
        _leave("");
 }
 
-/*
- * generate a connection-level abort
- */
-static int rxrpc_abort_connection(struct rxrpc_connection *conn,
-                                 int error, u32 abort_code)
-{
-       struct rxrpc_wire_header whdr;
-       struct msghdr msg;
-       struct kvec iov[2];
-       __be32 word;
-       size_t len;
-       u32 serial;
-       int ret;
-
-       _enter("%d,,%u,%u", conn->debug_id, error, abort_code);
-
-       /* generate a connection-level abort */
-       spin_lock(&conn->state_lock);
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               spin_unlock(&conn->state_lock);
-               _leave(" = 0 [already dead]");
-               return 0;
-       }
-
-       conn->error = error;
-       conn->abort_code = abort_code;
-       conn->state = RXRPC_CONN_LOCALLY_ABORTED;
-       set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-       spin_unlock(&conn->state_lock);
-
-       msg.msg_name    = &conn->peer->srx.transport;
-       msg.msg_namelen = conn->peer->srx.transport_len;
-       msg.msg_control = NULL;
-       msg.msg_controllen = 0;
-       msg.msg_flags   = 0;
-
-       whdr.epoch      = htonl(conn->proto.epoch);
-       whdr.cid        = htonl(conn->proto.cid);
-       whdr.callNumber = 0;
-       whdr.seq        = 0;
-       whdr.type       = RXRPC_PACKET_TYPE_ABORT;
-       whdr.flags      = conn->out_clientflag;
-       whdr.userStatus = 0;
-       whdr.securityIndex = conn->security_ix;
-       whdr._rsvd      = 0;
-       whdr.serviceId  = htons(conn->service_id);
-
-       word            = htonl(conn->abort_code);
-
-       iov[0].iov_base = &whdr;
-       iov[0].iov_len  = sizeof(whdr);
-       iov[1].iov_base = &word;
-       iov[1].iov_len  = sizeof(word);
-
-       len = iov[0].iov_len + iov[1].iov_len;
-
-       serial = atomic_inc_return(&conn->serial);
-       rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
-       whdr.serial = htonl(serial);
-
-       ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len);
-       if (ret < 0) {
-               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
-                                   rxrpc_tx_point_conn_abort);
-               _debug("sendmsg failed: %d", ret);
-               return -EAGAIN;
-       }
-
-       trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
-
-       conn->peer->last_tx_at = ktime_get_seconds();
-
-       _leave(" = 0");
-       return 0;
-}
-
 /*
  * mark a call as being on a now-secured channel
  * - must be called with BH's disabled.
  */
 static void rxrpc_call_is_secure(struct rxrpc_call *call)
 {
-       _enter("%p", call);
-       if (call) {
-               write_lock(&call->state_lock);
-               if (call->state == RXRPC_CALL_SERVER_SECURING) {
-                       call->state = RXRPC_CALL_SERVER_RECV_REQUEST;
-                       rxrpc_notify_socket(call);
-               }
-               write_unlock(&call->state_lock);
+       if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) {
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST);
+               rxrpc_notify_socket(call);
        }
 }
 
@@ -278,44 +220,22 @@ static void rxrpc_call_is_secure(struct rxrpc_call *call)
  * connection-level Rx packet processor
  */
 static int rxrpc_process_event(struct rxrpc_connection *conn,
-                              struct sk_buff *skb,
-                              u32 *_abort_code)
+                              struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       int loop, ret;
+       int ret;
 
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               _leave(" = -ECONNABORTED [%u]", conn->state);
+       if (conn->state == RXRPC_CONN_ABORTED)
                return -ECONNABORTED;
-       }
 
        _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
 
        switch (sp->hdr.type) {
-       case RXRPC_PACKET_TYPE_DATA:
-       case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb,
-                                          sp->hdr.cid & RXRPC_CHANNELMASK);
-               return 0;
-
-       case RXRPC_PACKET_TYPE_BUSY:
-               /* Just ignore BUSY packets for now. */
-               return 0;
-
-       case RXRPC_PACKET_TYPE_ABORT:
-               conn->error = -ECONNABORTED;
-               conn->abort_code = skb->priority;
-               conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
-               return -ECONNABORTED;
-
        case RXRPC_PACKET_TYPE_CHALLENGE:
-               return conn->security->respond_to_challenge(conn, skb,
-                                                           _abort_code);
+               return conn->security->respond_to_challenge(conn, skb);
 
        case RXRPC_PACKET_TYPE_RESPONSE:
-               ret = conn->security->verify_response(conn, skb, _abort_code);
+               ret = conn->security->verify_response(conn, skb);
                if (ret < 0)
                        return ret;
 
@@ -324,27 +244,25 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
                if (ret < 0)
                        return ret;
 
-               spin_lock(&conn->bundle->channel_lock);
                spin_lock(&conn->state_lock);
-
-               if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) {
+               if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING)
                        conn->state = RXRPC_CONN_SERVICE;
-                       spin_unlock(&conn->state_lock);
-                       for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
-                               rxrpc_call_is_secure(
-                                       rcu_dereference_protected(
-                                               conn->channels[loop].call,
-                                               lockdep_is_held(&conn->bundle->channel_lock)));
-               } else {
-                       spin_unlock(&conn->state_lock);
-               }
+               spin_unlock(&conn->state_lock);
 
-               spin_unlock(&conn->bundle->channel_lock);
+               if (conn->state == RXRPC_CONN_SERVICE) {
+                       /* Offload call state flipping to the I/O thread.  As
+                        * we've already received the packet, put it on the
+                        * front of the queue.
+                        */
+                       skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED;
+                       rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured);
+                       skb_queue_head(&conn->local->rx_queue, skb);
+                       rxrpc_wake_up_io_thread(conn->local);
+               }
                return 0;
 
        default:
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_conn_pkt"));
+               WARN_ON_ONCE(1);
                return -EPROTO;
        }
 }
@@ -354,26 +272,9 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
  */
 static void rxrpc_secure_connection(struct rxrpc_connection *conn)
 {
-       u32 abort_code;
-       int ret;
-
-       _enter("{%d}", conn->debug_id);
-
-       ASSERT(conn->security_ix != 0);
-
-       if (conn->security->issue_challenge(conn) < 0) {
-               abort_code = RX_CALL_DEAD;
-               ret = -ENOMEM;
-               goto abort;
-       }
-
-       _leave("");
-       return;
-
-abort:
-       _debug("abort %d, %d", ret, abort_code);
-       rxrpc_abort_connection(conn, ret, abort_code);
-       _leave(" [aborted]");
+       if (conn->security->issue_challenge(conn) < 0)
+               rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM,
+                                rxrpc_abort_nomem);
 }
 
 /*
@@ -395,9 +296,7 @@ again:
                if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
                        continue;
 
-               smp_rmb(); /* vs rxrpc_disconnect_client_call */
-               ack_at = READ_ONCE(chan->final_ack_at);
-
+               ack_at = chan->final_ack_at;
                if (time_before(j, ack_at) && !force) {
                        if (time_before(ack_at, next_j)) {
                                next_j = ack_at;
@@ -424,47 +323,27 @@ again:
 static void rxrpc_do_process_connection(struct rxrpc_connection *conn)
 {
        struct sk_buff *skb;
-       u32 abort_code = RX_PROTOCOL_ERROR;
        int ret;
 
        if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
                rxrpc_secure_connection(conn);
 
-       /* Process delayed ACKs whose time has come. */
-       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
-               rxrpc_process_delayed_final_acks(conn, false);
-
        /* go through the conn-level event packets, releasing the ref on this
         * connection that each one has when we've finished with it */
        while ((skb = skb_dequeue(&conn->rx_queue))) {
                rxrpc_see_skb(skb, rxrpc_skb_see_conn_work);
-               ret = rxrpc_process_event(conn, skb, &abort_code);
+               ret = rxrpc_process_event(conn, skb);
                switch (ret) {
-               case -EPROTO:
-               case -EKEYEXPIRED:
-               case -EKEYREJECTED:
-                       goto protocol_error;
                case -ENOMEM:
                case -EAGAIN:
-                       goto requeue_and_leave;
-               case -ECONNABORTED:
+                       skb_queue_head(&conn->rx_queue, skb);
+                       rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work);
+                       break;
                default:
                        rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
                        break;
                }
        }
-
-       return;
-
-requeue_and_leave:
-       skb_queue_head(&conn->rx_queue, skb);
-       return;
-
-protocol_error:
-       if (rxrpc_abort_connection(conn, ret, abort_code) < 0)
-               goto requeue_and_leave;
-       rxrpc_free_skb(skb, rxrpc_skb_put_conn_work);
-       return;
 }
 
 void rxrpc_process_connection(struct work_struct *work)
@@ -498,44 +377,59 @@ static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
 /*
  * Input a connection-level packet.
  */
-int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
+bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
 
-       if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
-               _leave(" = -ECONNABORTED [%u]", conn->state);
-               return -ECONNABORTED;
-       }
-
-       _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial);
-
        switch (sp->hdr.type) {
-       case RXRPC_PACKET_TYPE_DATA:
-       case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_conn_retransmit_call(conn, skb,
-                                          sp->hdr.cid & RXRPC_CHANNELMASK);
-               return 0;
-
        case RXRPC_PACKET_TYPE_BUSY:
                /* Just ignore BUSY packets for now. */
-               return 0;
+               return true;
 
        case RXRPC_PACKET_TYPE_ABORT:
-               conn->error = -ECONNABORTED;
-               conn->abort_code = skb->priority;
-               conn->state = RXRPC_CONN_REMOTELY_ABORTED;
-               set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
-               rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
-               return -ECONNABORTED;
+               if (rxrpc_is_conn_aborted(conn))
+                       return true;
+               rxrpc_input_conn_abort(conn, skb);
+               rxrpc_abort_calls(conn);
+               return true;
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
        case RXRPC_PACKET_TYPE_RESPONSE:
+               if (rxrpc_is_conn_aborted(conn)) {
+                       if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED)
+                               rxrpc_send_conn_abort(conn);
+                       return true;
+               }
                rxrpc_post_packet_to_conn(conn, skb);
-               return 0;
+               return true;
 
        default:
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_conn_pkt"));
-               return -EPROTO;
+               WARN_ON_ONCE(1);
+               return true;
        }
 }
+
+/*
+ * Input a connection event.
+ */
+void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb)
+{
+       unsigned int loop;
+
+       if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events))
+               rxrpc_abort_calls(conn);
+
+       switch (skb->mark) {
+       case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+               if (conn->state != RXRPC_CONN_SERVICE)
+                       break;
+
+               for (loop = 0; loop < RXRPC_MAXCALLS; loop++)
+                       rxrpc_call_is_secure(conn->channels[loop].call);
+               break;
+       }
+
+       /* Process delayed ACKs whose time has come. */
+       if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+               rxrpc_process_delayed_final_acks(conn, false);
+}
index 3c8f83dacb2b35cbe698ae1f4c89a0c1cf5ea266..ac85d4644a3c3a047066129e69c40a626557f1c7 100644 (file)
@@ -23,12 +23,30 @@ static void rxrpc_clean_up_connection(struct work_struct *work);
 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
                                         unsigned long reap_at);
 
+void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
+{
+       struct rxrpc_local *local = conn->local;
+       bool busy;
+
+       if (WARN_ON_ONCE(!local))
+               return;
+
+       spin_lock_bh(&local->lock);
+       busy = !list_empty(&conn->attend_link);
+       if (!busy) {
+               rxrpc_get_connection(conn, why);
+               list_add_tail(&conn->attend_link, &local->conn_attend_q);
+       }
+       spin_unlock_bh(&local->lock);
+       rxrpc_wake_up_io_thread(local);
+}
+
 static void rxrpc_connection_timer(struct timer_list *timer)
 {
        struct rxrpc_connection *conn =
                container_of(timer, struct rxrpc_connection, timer);
 
-       rxrpc_queue_conn(conn, rxrpc_conn_queue_timer);
+       rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
 }
 
 /*
@@ -49,6 +67,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
                INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
                INIT_LIST_HEAD(&conn->proc_link);
                INIT_LIST_HEAD(&conn->link);
+               mutex_init(&conn->security_lock);
                skb_queue_head_init(&conn->rx_queue);
                conn->rxnet = rxnet;
                conn->security = &rxrpc_no_security;
@@ -82,10 +101,10 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
 
        _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
 
-       /* Look up client connections by connection ID alone as their IDs are
-        * unique for this machine.
+       /* Look up client connections by connection ID alone as their
+        * IDs are unique for this machine.
         */
-       conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
+       conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
        if (!conn || refcount_read(&conn->ref) == 0) {
                _debug("no conn");
                goto not_found;
@@ -139,7 +158,7 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
 
        _enter("%d,%x", conn->debug_id, call->cid);
 
-       if (rcu_access_pointer(chan->call) == call) {
+       if (chan->call == call) {
                /* Save the result of the call so that we can repeat it if necessary
                 * through the channel, whilst disposing of the actual call record.
                 */
@@ -159,12 +178,9 @@ void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
                        break;
                }
 
-               /* Sync with rxrpc_conn_retransmit(). */
-               smp_wmb();
                chan->last_call = chan->call_id;
                chan->call_id = chan->call_counter;
-
-               rcu_assign_pointer(chan->call, NULL);
+               chan->call = NULL;
        }
 
        _leave("");
@@ -178,6 +194,9 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
 {
        struct rxrpc_connection *conn = call->conn;
 
+       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
+       rxrpc_see_call(call, rxrpc_call_see_disconnected);
+
        call->peer->cong_ssthresh = call->cong_ssthresh;
 
        if (!hlist_unhashed(&call->error_link)) {
@@ -186,18 +205,17 @@ void rxrpc_disconnect_call(struct rxrpc_call *call)
                spin_unlock(&call->peer->lock);
        }
 
-       if (rxrpc_is_client_call(call))
-               return rxrpc_disconnect_client_call(conn->bundle, call);
-
-       spin_lock(&conn->bundle->channel_lock);
-       __rxrpc_disconnect_call(conn, call);
-       spin_unlock(&conn->bundle->channel_lock);
+       if (rxrpc_is_client_call(call)) {
+               rxrpc_disconnect_client_call(call->bundle, call);
+       } else {
+               __rxrpc_disconnect_call(conn, call);
+               conn->idle_timestamp = jiffies;
+               if (atomic_dec_and_test(&conn->active))
+                       rxrpc_set_service_reap_timer(conn->rxnet,
+                                                    jiffies + rxrpc_connection_expiry);
+       }
 
-       set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
-       conn->idle_timestamp = jiffies;
-       if (atomic_dec_and_test(&conn->active))
-               rxrpc_set_service_reap_timer(conn->rxnet,
-                                            jiffies + rxrpc_connection_expiry);
+       rxrpc_put_call(call, rxrpc_call_put_io_thread);
 }
 
 /*
@@ -293,10 +311,10 @@ static void rxrpc_clean_up_connection(struct work_struct *work)
                container_of(work, struct rxrpc_connection, destructor);
        struct rxrpc_net *rxnet = conn->rxnet;
 
-       ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
-              !rcu_access_pointer(conn->channels[1].call) &&
-              !rcu_access_pointer(conn->channels[2].call) &&
-              !rcu_access_pointer(conn->channels[3].call));
+       ASSERT(!conn->channels[0].call &&
+              !conn->channels[1].call &&
+              !conn->channels[2].call &&
+              !conn->channels[3].call);
        ASSERT(list_empty(&conn->cache_link));
 
        del_timer_sync(&conn->timer);
@@ -447,7 +465,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
        _enter("");
 
        atomic_dec(&rxnet->nr_conns);
-       rxrpc_destroy_all_client_connections(rxnet);
 
        del_timer_sync(&rxnet->service_conn_reap_timer);
        rxrpc_queue_work(&rxnet->service_conn_reaper);
index 2a55a88b2a5b7e5f07204395e39b96130324659c..f30323de82bd11873e7bef81399d3ca269df3fce 100644 (file)
@@ -11,7 +11,6 @@
 static struct rxrpc_bundle rxrpc_service_dummy_bundle = {
        .ref            = REFCOUNT_INIT(1),
        .debug_id       = UINT_MAX,
-       .channel_lock   = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock),
 };
 
 /*
index d0e20e946e48d56af203cb42c3547723870bf842..367927a998815f474ed64d2fdb4327ca8ef4e41e 100644 (file)
@@ -9,11 +9,10 @@
 
 #include "ar-internal.h"
 
-static void rxrpc_proto_abort(const char *why,
-                             struct rxrpc_call *call, rxrpc_seq_t seq)
+static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
+                             enum rxrpc_abort_reason why)
 {
-       if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG))
-               rxrpc_send_abort_packet(call);
+       rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
 }
 
 /*
@@ -185,7 +184,7 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
        if (call->cong_mode != RXRPC_CALL_SLOW_START &&
            call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE)
                return;
-       if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
+       if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
                return;
 
        rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8));
@@ -250,47 +249,34 @@ static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
  * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
  * or a final ACK packet.
  */
-static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
-                              const char *abort_why)
+static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
+                              enum rxrpc_abort_reason abort_why)
 {
-       unsigned int state;
-
        ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
 
-       write_lock(&call->state_lock);
-
-       state = call->state;
-       switch (state) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
-               if (reply_begun)
-                       call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
-               else
-                       call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
+               if (reply_begun) {
+                       rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
+                       trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
+                       break;
+               }
+
+               rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
+               trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
                break;
 
        case RXRPC_CALL_SERVER_AWAIT_ACK:
-               __rxrpc_call_completed(call);
-               state = call->state;
+               rxrpc_call_completed(call);
+               trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
                break;
 
        default:
-               goto bad_state;
+               kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
+               rxrpc_proto_abort(call, call->tx_top, abort_why);
+               break;
        }
-
-       write_unlock(&call->state_lock);
-       if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
-               trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
-       else
-               trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
-       _leave(" = ok");
-       return true;
-
-bad_state:
-       write_unlock(&call->state_lock);
-       kdebug("end_tx %s", rxrpc_call_states[call->state]);
-       rxrpc_proto_abort(abort_why, call, call->tx_top);
-       return false;
 }
 
 /*
@@ -305,18 +291,48 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
        if (call->ackr_reason) {
                now = jiffies;
                timo = now + MAX_JIFFY_OFFSET;
-               WRITE_ONCE(call->resend_at, timo);
+
                WRITE_ONCE(call->delay_ack_at, timo);
                trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
        }
 
        if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
                if (!rxrpc_rotate_tx_window(call, top, &summary)) {
-                       rxrpc_proto_abort("TXL", call, top);
+                       rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
                        return false;
                }
        }
-       return rxrpc_end_tx_phase(call, true, "ETD");
+
+       rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
+       return true;
+}
+
+/*
+ * End the packet reception phase.
+ */
+static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
+{
+       rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
+
+       _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
+
+       trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
+
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_RECV_REPLY:
+               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
+               rxrpc_call_completed(call);
+               break;
+
+       case RXRPC_CALL_SERVER_RECV_REQUEST:
+               rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
+               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
+               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
+               break;
+
+       default:
+               break;
+       }
 }
 
 static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
@@ -337,8 +353,9 @@ static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        __skb_queue_tail(&call->recvmsg_queue, skb);
        rxrpc_input_update_ack_window(call, window, wtop);
-
        trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
+       if (last)
+               rxrpc_end_rx_phase(call, sp->hdr.serial);
 }
 
 /*
@@ -366,17 +383,14 @@ static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
 
        if (last) {
                if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
-                   seq + 1 != wtop) {
-                       rxrpc_proto_abort("LSN", call, seq);
-                       return;
-               }
+                   seq + 1 != wtop)
+                       return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
        } else {
                if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
                    after_eq(seq, wtop)) {
                        pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
                                call->debug_id, seq, window, wtop, wlimit);
-                       rxrpc_proto_abort("LSA", call, seq);
-                       return;
+                       return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
                }
        }
 
@@ -550,7 +564,6 @@ protocol_error:
 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       enum rxrpc_call_state state;
        rxrpc_serial_t serial = sp->hdr.serial;
        rxrpc_seq_t seq0 = sp->hdr.seq;
 
@@ -558,11 +571,20 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
               atomic64_read(&call->ackr_window), call->rx_highest_seq,
               skb->len, seq0);
 
-       state = READ_ONCE(call->state);
-       if (state >= RXRPC_CALL_COMPLETE)
+       if (__rxrpc_call_is_complete(call))
                return;
 
-       if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+       switch (__rxrpc_call_state(call)) {
+       case RXRPC_CALL_CLIENT_SEND_REQUEST:
+       case RXRPC_CALL_CLIENT_AWAIT_REPLY:
+               /* Received data implicitly ACKs all of the request
+                * packets we sent when we're acting as a client.
+                */
+               if (!rxrpc_receiving_reply(call))
+                       goto out_notify;
+               break;
+
+       case RXRPC_CALL_SERVER_RECV_REQUEST: {
                unsigned long timo = READ_ONCE(call->next_req_timo);
                unsigned long now, expect_req_by;
 
@@ -573,18 +595,15 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
                        rxrpc_reduce_call_timer(call, expect_req_by, now,
                                                rxrpc_timer_set_for_idle);
                }
+               break;
        }
 
-       /* Received data implicitly ACKs all of the request packets we sent
-        * when we're acting as a client.
-        */
-       if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
-            state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
-           !rxrpc_receiving_reply(call))
-               goto out_notify;
+       default:
+               break;
+       }
 
        if (!rxrpc_input_split_jumbo(call, skb)) {
-               rxrpc_proto_abort("VLD", call, sp->hdr.seq);
+               rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
                goto out_notify;
        }
        skb = NULL;
@@ -765,7 +784,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
 
        offset = sizeof(struct rxrpc_wire_header);
        if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0)
-               return rxrpc_proto_abort("XAK", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack);
        offset += sizeof(ack);
 
        ack_serial = sp->hdr.serial;
@@ -845,7 +864,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
        ioffset = offset + nr_acks + 3;
        if (skb->len >= ioffset + sizeof(info) &&
            skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0)
-               return rxrpc_proto_abort("XAI", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info);
 
        if (nr_acks > 0)
                skb_condense(skb);
@@ -868,10 +887,10 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
                rxrpc_input_ackinfo(call, skb, &info);
 
        if (first_soft_ack == 0)
-               return rxrpc_proto_abort("AK0", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
 
        /* Ignore ACKs unless we are or have just been transmitting. */
-       switch (READ_ONCE(call->state)) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_CLIENT_SEND_REQUEST:
        case RXRPC_CALL_CLIENT_AWAIT_REPLY:
        case RXRPC_CALL_SERVER_SEND_REPLY:
@@ -883,20 +902,20 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
 
        if (before(hard_ack, call->acks_hard_ack) ||
            after(hard_ack, call->tx_top))
-               return rxrpc_proto_abort("AKW", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
        if (nr_acks > call->tx_top - hard_ack)
-               return rxrpc_proto_abort("AKN", call, 0);
+               return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
 
        if (after(hard_ack, call->acks_hard_ack)) {
                if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
-                       rxrpc_end_tx_phase(call, false, "ETA");
+                       rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
                        return;
                }
        }
 
        if (nr_acks > 0) {
                if (offset > (int)skb->len - nr_acks)
-                       return rxrpc_proto_abort("XSA", call, 0);
+                       return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
                rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack,
                                      nr_acks, &summary);
        }
@@ -918,7 +937,7 @@ static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
        struct rxrpc_ack_summary summary = { 0 };
 
        if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
-               rxrpc_end_tx_phase(call, false, "ETL");
+               rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
 }
 
 /*
@@ -963,27 +982,23 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
 
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_DATA:
-               rxrpc_input_data(call, skb);
-               break;
+               return rxrpc_input_data(call, skb);
 
        case RXRPC_PACKET_TYPE_ACK:
-               rxrpc_input_ack(call, skb);
-               break;
+               return rxrpc_input_ack(call, skb);
 
        case RXRPC_PACKET_TYPE_BUSY:
                /* Just ignore BUSY packets from the server; the retry and
                 * lifespan timers will take care of business.  BUSY packets
                 * from the client don't make sense.
                 */
-               break;
+               return;
 
        case RXRPC_PACKET_TYPE_ABORT:
-               rxrpc_input_abort(call, skb);
-               break;
+               return rxrpc_input_abort(call, skb);
 
        case RXRPC_PACKET_TYPE_ACKALL:
-               rxrpc_input_ackall(call, skb);
-               break;
+               return rxrpc_input_ackall(call, skb);
 
        default:
                break;
@@ -998,24 +1013,18 @@ void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
  */
 void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
 {
-       struct rxrpc_connection *conn = call->conn;
-
-       switch (READ_ONCE(call->state)) {
+       switch (__rxrpc_call_state(call)) {
        case RXRPC_CALL_SERVER_AWAIT_ACK:
                rxrpc_call_completed(call);
                fallthrough;
        case RXRPC_CALL_COMPLETE:
                break;
        default:
-               if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN))
-                       rxrpc_send_abort_packet(call);
+               rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
+                                rxrpc_eproto_improper_term);
                trace_rxrpc_improper_term(call);
                break;
        }
 
        rxrpc_input_call_event(call, skb);
-
-       spin_lock(&conn->bundle->channel_lock);
-       __rxrpc_disconnect_call(conn, call);
-       spin_unlock(&conn->bundle->channel_lock);
 }
index 0eb8471bfc53672f2ca4eec436c360b9c88d3bfd..34353b6e584bc51287b606483766ae9bddb2e475 100644 (file)
@@ -43,25 +43,17 @@ static void none_free_call_crypto(struct rxrpc_call *call)
 }
 
 static int none_respond_to_challenge(struct rxrpc_connection *conn,
-                                    struct sk_buff *skb,
-                                    u32 *_abort_code)
+                                    struct sk_buff *skb)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                             tracepoint_string("chall_none"));
-       return -EPROTO;
+       return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                               rxrpc_eproto_rxnull_challenge);
 }
 
 static int none_verify_response(struct rxrpc_connection *conn,
-                               struct sk_buff *skb,
-                               u32 *_abort_code)
+                               struct sk_buff *skb)
 {
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                             tracepoint_string("resp_none"));
-       return -EPROTO;
+       return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                               rxrpc_eproto_rxnull_response);
 }
 
 static void none_clear(struct rxrpc_connection *conn)
index 1ad067d66fb6040f1f1a14028b918d23b0682cba..9e9dfb2fc559be6c2615ecaf2e675f9e3c73d752 100644 (file)
@@ -66,10 +66,32 @@ void rxrpc_error_report(struct sock *sk)
        rcu_read_unlock();
 }
 
+/*
+ * Directly produce an abort from a packet.
+ */
+bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why,
+                       s32 abort_code, int err)
+{
+       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+
+       trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
+                         abort_code, err);
+       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
+       skb->priority = abort_code;
+       return false;
+}
+
+static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why)
+{
+       return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG);
+}
+
+#define just_discard true
+
 /*
  * Process event packets targeted at a local endpoint.
  */
-static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
+static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        char v;
@@ -81,22 +103,21 @@ static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb)
                if (v == 0)
                        rxrpc_send_version_request(local, &sp->hdr, skb);
        }
+
+       return true;
 }
 
 /*
  * Extract the wire header from a packet and translate the byte order.
  */
-static noinline
-int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
+static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp,
+                                struct sk_buff *skb)
 {
        struct rxrpc_wire_header whdr;
 
        /* dig out the RxRPC connection details */
-       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
-               trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
-                                     tracepoint_string("bad_hdr"));
-               return -EBADMSG;
-       }
+       if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0)
+               return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr);
 
        memset(sp, 0, sizeof(*sp));
        sp->hdr.epoch           = ntohl(whdr.epoch);
@@ -110,7 +131,7 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
        sp->hdr.securityIndex   = whdr.securityIndex;
        sp->hdr._rsvd           = ntohs(whdr._rsvd);
        sp->hdr.serviceId       = ntohs(whdr.serviceId);
-       return 0;
+       return true;
 }
 
 /*
@@ -130,28 +151,28 @@ static bool rxrpc_extract_abort(struct sk_buff *skb)
 /*
  * Process packets received on the local endpoint
  */
-static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
+static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 {
        struct rxrpc_connection *conn;
        struct sockaddr_rxrpc peer_srx;
        struct rxrpc_skb_priv *sp;
        struct rxrpc_peer *peer = NULL;
        struct sk_buff *skb = *_skb;
-       int ret = 0;
+       bool ret = false;
 
        skb_pull(skb, sizeof(struct udphdr));
 
        sp = rxrpc_skb(skb);
 
        /* dig out the RxRPC connection details */
-       if (rxrpc_extract_header(sp, skb) < 0)
-               goto bad_message;
+       if (!rxrpc_extract_header(sp, skb))
+               return just_discard;
 
        if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
                static int lose;
                if ((lose++ & 7) == 7) {
                        trace_rxrpc_rx_lose(sp);
-                       return 0;
+                       return just_discard;
                }
        }
 
@@ -160,28 +181,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
        switch (sp->hdr.type) {
        case RXRPC_PACKET_TYPE_VERSION:
                if (rxrpc_to_client(sp))
-                       return 0;
-               rxrpc_input_version(local, skb);
-               return 0;
+                       return just_discard;
+               return rxrpc_input_version(local, skb);
 
        case RXRPC_PACKET_TYPE_BUSY:
                if (rxrpc_to_server(sp))
-                       return 0;
+                       return just_discard;
                fallthrough;
        case RXRPC_PACKET_TYPE_ACK:
        case RXRPC_PACKET_TYPE_ACKALL:
                if (sp->hdr.callNumber == 0)
-                       goto bad_message;
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
                break;
        case RXRPC_PACKET_TYPE_ABORT:
                if (!rxrpc_extract_abort(skb))
-                       return 0; /* Just discard if malformed */
+                       return just_discard; /* Just discard if malformed */
                break;
 
        case RXRPC_PACKET_TYPE_DATA:
-               if (sp->hdr.callNumber == 0 ||
-                   sp->hdr.seq == 0)
-                       goto bad_message;
+               if (sp->hdr.callNumber == 0)
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call);
+               if (sp->hdr.seq == 0)
+                       return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq);
 
                /* Unshare the packet so that it can be modified for in-place
                 * decryption.
@@ -191,7 +212,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                        if (!skb) {
                                rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem);
                                *_skb = NULL;
-                               return 0;
+                               return just_discard;
                        }
 
                        if (skb != *_skb) {
@@ -205,28 +226,28 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 
        case RXRPC_PACKET_TYPE_CHALLENGE:
                if (rxrpc_to_server(sp))
-                       return 0;
+                       return just_discard;
                break;
        case RXRPC_PACKET_TYPE_RESPONSE:
                if (rxrpc_to_client(sp))
-                       return 0;
+                       return just_discard;
                break;
 
                /* Packet types 9-11 should just be ignored. */
        case RXRPC_PACKET_TYPE_PARAMS:
        case RXRPC_PACKET_TYPE_10:
        case RXRPC_PACKET_TYPE_11:
-               return 0;
+               return just_discard;
 
        default:
-               goto bad_message;
+               return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet);
        }
 
        if (sp->hdr.serviceId == 0)
-               goto bad_message;
+               return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service);
 
        if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0))
-               return true; /* Unsupported address type - discard. */
+               return just_discard; /* Unsupported address type. */
 
        if (peer_srx.transport.family != local->srx.transport.family &&
            (peer_srx.transport.family == AF_INET &&
@@ -234,7 +255,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
                                    peer_srx.transport.family,
                                    local->srx.transport.family);
-               return true; /* Wrong address type - discard. */
+               return just_discard; /* Wrong address type. */
        }
 
        if (rxrpc_to_client(sp)) {
@@ -242,12 +263,8 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
                conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb);
                conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input);
                rcu_read_unlock();
-               if (!conn) {
-                       trace_rxrpc_abort(0, "NCC", sp->hdr.cid,
-                                         sp->hdr.callNumber, sp->hdr.seq,
-                                         RXKADINCONSISTENCY, EBADMSG);
-                       goto protocol_error;
-               }
+               if (!conn)
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn);
 
                ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb);
                rxrpc_put_connection(conn, rxrpc_conn_put_call_input);
@@ -280,19 +297,7 @@ static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb)
 
        ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb);
        rxrpc_put_peer(peer, rxrpc_peer_put_input);
-       if (ret < 0)
-               goto reject_packet;
-       return 0;
-
-bad_message:
-       trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
-       skb->priority = RX_PROTOCOL_ERROR;
-       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
-       rxrpc_reject_packet(local, skb);
-       return 0;
+       return ret;
 }
 
 /*
@@ -306,21 +311,23 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
        struct rxrpc_channel *chan;
        struct rxrpc_call *call = NULL;
        unsigned int channel;
+       bool ret;
 
        if (sp->hdr.securityIndex != conn->security_ix)
-               goto wrong_security;
+               return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security,
+                                         RXKADINCONSISTENCY, -EBADMSG);
 
        if (sp->hdr.serviceId != conn->service_id) {
                int old_id;
 
                if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
-                       goto reupgrade;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade);
+
                old_id = cmpxchg(&conn->service_id, conn->orig_service_id,
                                 sp->hdr.serviceId);
-
                if (old_id != conn->orig_service_id &&
                    old_id != sp->hdr.serviceId)
-                       goto reupgrade;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade);
        }
 
        if (after(sp->hdr.serial, conn->hi_serial))
@@ -336,19 +343,19 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
 
        /* Ignore really old calls */
        if (sp->hdr.callNumber < chan->last_call)
-               return 0;
+               return just_discard;
 
        if (sp->hdr.callNumber == chan->last_call) {
                if (chan->call ||
                    sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
-                       return 0;
+                       return just_discard;
 
                /* For the previous service call, if completed successfully, we
                 * discard all further packets.
                 */
                if (rxrpc_conn_is_service(conn) &&
                    chan->last_type == RXRPC_PACKET_TYPE_ACK)
-                       return 0;
+                       return just_discard;
 
                /* But otherwise we need to retransmit the final packet from
                 * data cached in the connection record.
@@ -358,19 +365,17 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
                                            sp->hdr.seq,
                                            sp->hdr.serial,
                                            sp->hdr.flags);
-               rxrpc_input_conn_packet(conn, skb);
-               return 0;
+               rxrpc_conn_retransmit_call(conn, skb, channel);
+               return just_discard;
        }
 
-       rcu_read_lock();
-       call = rxrpc_try_get_call(rcu_dereference(chan->call),
-                                 rxrpc_call_get_input);
-       rcu_read_unlock();
+       call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input);
 
        if (sp->hdr.callNumber > chan->call_id) {
                if (rxrpc_to_client(sp)) {
                        rxrpc_put_call(call, rxrpc_call_put_input);
-                       goto reject_packet;
+                       return rxrpc_protocol_error(skb,
+                                                   rxrpc_eproto_unexpected_implicit_end);
                }
 
                if (call) {
@@ -382,38 +387,14 @@ static int rxrpc_input_packet_on_conn(struct rxrpc_connection *conn,
 
        if (!call) {
                if (rxrpc_to_client(sp))
-                       goto bad_message;
-               if (rxrpc_new_incoming_call(conn->local, conn->peer, conn,
-                                           peer_srx, skb) == 0)
-                       return 0;
-               goto reject_packet;
+                       return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call);
+               return rxrpc_new_incoming_call(conn->local, conn->peer, conn,
+                                              peer_srx, skb);
        }
 
-       rxrpc_input_call_event(call, skb);
+       ret = rxrpc_input_call_event(call, skb);
        rxrpc_put_call(call, rxrpc_call_put_input);
-       return 0;
-
-wrong_security:
-       trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RXKADINCONSISTENCY, EBADMSG);
-       skb->priority = RXKADINCONSISTENCY;
-       goto post_abort;
-
-reupgrade:
-       trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-       goto protocol_error;
-
-bad_message:
-       trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                         RX_PROTOCOL_ERROR, EBADMSG);
-protocol_error:
-       skb->priority = RX_PROTOCOL_ERROR;
-post_abort:
-       skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-reject_packet:
-       rxrpc_reject_packet(conn->local, skb);
-       return 0;
+       return ret;
 }
 
 /*
@@ -421,6 +402,7 @@ reject_packet:
  */
 int rxrpc_io_thread(void *data)
 {
+       struct rxrpc_connection *conn;
        struct sk_buff_head rx_queue;
        struct rxrpc_local *local = data;
        struct rxrpc_call *call;
@@ -436,6 +418,24 @@ int rxrpc_io_thread(void *data)
        for (;;) {
                rxrpc_inc_stat(local->rxnet, stat_io_loop);
 
+               /* Deal with connections that want immediate attention. */
+               conn = list_first_entry_or_null(&local->conn_attend_q,
+                                               struct rxrpc_connection,
+                                               attend_link);
+               if (conn) {
+                       spin_lock_bh(&local->lock);
+                       list_del_init(&conn->attend_link);
+                       spin_unlock_bh(&local->lock);
+
+                       rxrpc_input_conn_event(conn, NULL);
+                       rxrpc_put_connection(conn, rxrpc_conn_put_poke);
+                       continue;
+               }
+
+               if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+                                      &local->client_conn_flags))
+                       rxrpc_discard_expired_client_conns(local);
+
                /* Deal with calls that want immediate attention. */
                if ((call = list_first_entry_or_null(&local->call_attend_q,
                                                     struct rxrpc_call,
@@ -450,12 +450,17 @@ int rxrpc_io_thread(void *data)
                        continue;
                }
 
+               if (!list_empty(&local->new_client_calls))
+                       rxrpc_connect_client_calls(local);
+
                /* Process received packets and errors. */
                if ((skb = __skb_dequeue(&rx_queue))) {
+                       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
                        switch (skb->mark) {
                        case RXRPC_SKB_MARK_PACKET:
                                skb->priority = 0;
-                               rxrpc_input_packet(local, &skb);
+                               if (!rxrpc_input_packet(local, &skb))
+                                       rxrpc_reject_packet(local, skb);
                                trace_rxrpc_rx_done(skb->mark, skb->priority);
                                rxrpc_free_skb(skb, rxrpc_skb_put_input);
                                break;
@@ -463,6 +468,11 @@ int rxrpc_io_thread(void *data)
                                rxrpc_input_error(local, skb);
                                rxrpc_free_skb(skb, rxrpc_skb_put_error_report);
                                break;
+                       case RXRPC_SKB_MARK_SERVICE_CONN_SECURED:
+                               rxrpc_input_conn_event(sp->conn, skb);
+                               rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke);
+                               rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured);
+                               break;
                        default:
                                WARN_ON_ONCE(1);
                                rxrpc_free_skb(skb, rxrpc_skb_put_unknown);
@@ -481,7 +491,11 @@ int rxrpc_io_thread(void *data)
                set_current_state(TASK_INTERRUPTIBLE);
                should_stop = kthread_should_stop();
                if (!skb_queue_empty(&local->rx_queue) ||
-                   !list_empty(&local->call_attend_q)) {
+                   !list_empty(&local->call_attend_q) ||
+                   !list_empty(&local->conn_attend_q) ||
+                   !list_empty(&local->new_client_calls) ||
+                   test_bit(RXRPC_CLIENT_CONN_REAP_TIMER,
+                            &local->client_conn_flags)) {
                        __set_current_state(TASK_RUNNING);
                        continue;
                }
index 270b63d8f37a646e3feb568fe6e637a18ceb37f7..b8eaca5d9f221362517b26aeeea91739c18bba24 100644 (file)
@@ -82,31 +82,59 @@ static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
        }
 }
 
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+       struct rxrpc_local *local =
+               container_of(timer, struct rxrpc_local, client_conn_reap_timer);
+
+       if (local->kill_all_client_conns &&
+           test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags))
+               rxrpc_wake_up_io_thread(local);
+}
+
 /*
  * Allocate a new local endpoint.
  */
-static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
+static struct rxrpc_local *rxrpc_alloc_local(struct net *net,
                                             const struct sockaddr_rxrpc *srx)
 {
        struct rxrpc_local *local;
+       u32 tmp;
 
        local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
        if (local) {
                refcount_set(&local->ref, 1);
                atomic_set(&local->active_users, 1);
-               local->rxnet = rxnet;
+               local->net = net;
+               local->rxnet = rxrpc_net(net);
                INIT_HLIST_NODE(&local->link);
                init_rwsem(&local->defrag_sem);
                init_completion(&local->io_thread_ready);
                skb_queue_head_init(&local->rx_queue);
+               INIT_LIST_HEAD(&local->conn_attend_q);
                INIT_LIST_HEAD(&local->call_attend_q);
+
                local->client_bundles = RB_ROOT;
                spin_lock_init(&local->client_bundles_lock);
+               local->kill_all_client_conns = false;
+               INIT_LIST_HEAD(&local->idle_client_conns);
+               timer_setup(&local->client_conn_reap_timer,
+                           rxrpc_client_conn_reap_timeout, 0);
+
                spin_lock_init(&local->lock);
                rwlock_init(&local->services_lock);
                local->debug_id = atomic_inc_return(&rxrpc_debug_id);
                memcpy(&local->srx, srx, sizeof(*srx));
                local->srx.srx_service = 0;
+               idr_init(&local->conn_ids);
+               get_random_bytes(&tmp, sizeof(tmp));
+               tmp &= 0x3fffffff;
+               if (tmp == 0)
+                       tmp = 1;
+               idr_set_cursor(&local->conn_ids, tmp);
+               INIT_LIST_HEAD(&local->new_client_calls);
+               spin_lock_init(&local->client_call_lock);
+
                trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
        }
 
@@ -248,7 +276,7 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
                goto found;
        }
 
-       local = rxrpc_alloc_local(rxnet, srx);
+       local = rxrpc_alloc_local(net, srx);
        if (!local)
                goto nomem;
 
@@ -407,6 +435,7 @@ void rxrpc_destroy_local(struct rxrpc_local *local)
         * local endpoint.
         */
        rxrpc_purge_queue(&local->rx_queue);
+       rxrpc_purge_client_connections(local);
 }
 
 /*
index 5905530e2f33b439173f90b6b6c69beb1464e5f2..a0319c040c25dea6a34441292409c9a39b61283e 100644 (file)
 
 unsigned int rxrpc_net_id;
 
-static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
-{
-       struct rxrpc_net *rxnet =
-               container_of(timer, struct rxrpc_net, client_conn_reap_timer);
-
-       if (rxnet->live)
-               rxrpc_queue_work(&rxnet->client_conn_reaper);
-}
-
 static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
 {
        struct rxrpc_net *rxnet =
@@ -63,14 +54,6 @@ static __net_init int rxrpc_init_net(struct net *net)
                    rxrpc_service_conn_reap_timeout, 0);
 
        atomic_set(&rxnet->nr_client_conns, 0);
-       rxnet->kill_all_client_conns = false;
-       spin_lock_init(&rxnet->client_conn_cache_lock);
-       mutex_init(&rxnet->client_conn_discard_lock);
-       INIT_LIST_HEAD(&rxnet->idle_client_conns);
-       INIT_WORK(&rxnet->client_conn_reaper,
-                 rxrpc_discard_expired_client_conns);
-       timer_setup(&rxnet->client_conn_reap_timer,
-                   rxrpc_client_conn_reap_timeout, 0);
 
        INIT_HLIST_HEAD(&rxnet->local_endpoints);
        mutex_init(&rxnet->local_mutex);
index 3d8c9f830ee0942989d3ec0b39598268d4cd77dc..a9746be296347a192cfeb0698a895886d247f743 100644 (file)
@@ -261,7 +261,7 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
                                      rxrpc_tx_point_call_ack);
        rxrpc_tx_backoff(call, ret);
 
-       if (call->state < RXRPC_CALL_COMPLETE) {
+       if (!__rxrpc_call_is_complete(call)) {
                if (ret < 0)
                        rxrpc_cancel_rtt_probe(call, serial, rtt_slot);
                rxrpc_set_keepalive(call);
@@ -544,6 +544,62 @@ send_fragmentable:
        goto done;
 }
 
+/*
+ * Transmit a connection-level abort.
+ */
+void rxrpc_send_conn_abort(struct rxrpc_connection *conn)
+{
+       struct rxrpc_wire_header whdr;
+       struct msghdr msg;
+       struct kvec iov[2];
+       __be32 word;
+       size_t len;
+       u32 serial;
+       int ret;
+
+       msg.msg_name    = &conn->peer->srx.transport;
+       msg.msg_namelen = conn->peer->srx.transport_len;
+       msg.msg_control = NULL;
+       msg.msg_controllen = 0;
+       msg.msg_flags   = 0;
+
+       whdr.epoch      = htonl(conn->proto.epoch);
+       whdr.cid        = htonl(conn->proto.cid);
+       whdr.callNumber = 0;
+       whdr.seq        = 0;
+       whdr.type       = RXRPC_PACKET_TYPE_ABORT;
+       whdr.flags      = conn->out_clientflag;
+       whdr.userStatus = 0;
+       whdr.securityIndex = conn->security_ix;
+       whdr._rsvd      = 0;
+       whdr.serviceId  = htons(conn->service_id);
+
+       word            = htonl(conn->abort_code);
+
+       iov[0].iov_base = &whdr;
+       iov[0].iov_len  = sizeof(whdr);
+       iov[1].iov_base = &word;
+       iov[1].iov_len  = sizeof(word);
+
+       len = iov[0].iov_len + iov[1].iov_len;
+
+       serial = atomic_inc_return(&conn->serial);
+       whdr.serial = htonl(serial);
+
+       iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len);
+       ret = do_udp_sendmsg(conn->local->socket, &msg, len);
+       if (ret < 0) {
+               trace_rxrpc_tx_fail(conn->debug_id, serial, ret,
+                                   rxrpc_tx_point_conn_abort);
+               _debug("sendmsg failed: %d", ret);
+               return;
+       }
+
+       trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort);
+
+       conn->peer->last_tx_at = ktime_get_seconds();
+}
+
 /*
  * Reject a packet through the local endpoint.
  */
@@ -667,7 +723,7 @@ void rxrpc_send_keepalive(struct rxrpc_peer *peer)
 static inline void rxrpc_instant_resend(struct rxrpc_call *call,
                                        struct rxrpc_txbuf *txb)
 {
-       if (call->state < RXRPC_CALL_COMPLETE)
+       if (!__rxrpc_call_is_complete(call))
                kdebug("resend");
 }
 
index 4eecea2be307b60baedf511f32121c49edd62ae8..8d7a715a0bb1ca4f4761d7cef833033d7cf30f70 100644 (file)
@@ -147,10 +147,10 @@ struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *local,
  * assess the MTU size for the network interface through which this peer is
  * reached
  */
-static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx,
+static void rxrpc_assess_MTU_size(struct rxrpc_local *local,
                                  struct rxrpc_peer *peer)
 {
-       struct net *net = sock_net(&rx->sk);
+       struct net *net = local->net;
        struct dst_entry *dst;
        struct rtable *rt;
        struct flowi fl;
@@ -236,11 +236,11 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp,
 /*
  * Initialise peer record.
  */
-static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
+static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer,
                            unsigned long hash_key)
 {
        peer->hash_key = hash_key;
-       rxrpc_assess_MTU_size(rx, peer);
+       rxrpc_assess_MTU_size(local, peer);
        peer->mtu = peer->if_mtu;
        peer->rtt_last_req = ktime_get_real();
 
@@ -272,8 +272,7 @@ static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
 /*
  * Set up a new peer.
  */
-static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
-                                           struct rxrpc_local *local,
+static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local,
                                            struct sockaddr_rxrpc *srx,
                                            unsigned long hash_key,
                                            gfp_t gfp)
@@ -285,7 +284,7 @@ static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx,
        peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client);
        if (peer) {
                memcpy(&peer->srx, srx, sizeof(*srx));
-               rxrpc_init_peer(rx, peer, hash_key);
+               rxrpc_init_peer(local, peer, hash_key);
        }
 
        _leave(" = %p", peer);
@@ -304,14 +303,13 @@ static void rxrpc_free_peer(struct rxrpc_peer *peer)
  * since we've already done a search in the list from the non-reentrant context
  * (the data_ready handler) that is the only place we can add new peers.
  */
-void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
-                            struct rxrpc_peer *peer)
+void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer)
 {
        struct rxrpc_net *rxnet = local->rxnet;
        unsigned long hash_key;
 
        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
-       rxrpc_init_peer(rx, peer, hash_key);
+       rxrpc_init_peer(local, peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
        hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
@@ -322,8 +320,7 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
 /*
  * obtain a remote transport endpoint for the specified address
  */
-struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
-                                    struct rxrpc_local *local,
+struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local,
                                     struct sockaddr_rxrpc *srx, gfp_t gfp)
 {
        struct rxrpc_peer *peer, *candidate;
@@ -343,7 +340,7 @@ struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx,
                /* The peer is not yet present in hash - create a candidate
                 * for a new record and then redo the search.
                 */
-               candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp);
+               candidate = rxrpc_create_peer(local, srx, hash_key, gfp);
                if (!candidate) {
                        _leave(" = NULL [nomem]");
                        return NULL;
index 3a59591ec061524f1580a0c57121129c175675cf..750158a085cdd1a6f8fe3bbe9cfec01f32ca3f3e 100644 (file)
 
 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
        [RXRPC_CONN_UNUSED]                     = "Unused  ",
+       [RXRPC_CONN_CLIENT_UNSECURED]           = "ClUnsec ",
        [RXRPC_CONN_CLIENT]                     = "Client  ",
        [RXRPC_CONN_SERVICE_PREALLOC]           = "SvPrealc",
        [RXRPC_CONN_SERVICE_UNSECURED]          = "SvUnsec ",
        [RXRPC_CONN_SERVICE_CHALLENGING]        = "SvChall ",
        [RXRPC_CONN_SERVICE]                    = "SvSecure",
-       [RXRPC_CONN_REMOTELY_ABORTED]           = "RmtAbort",
-       [RXRPC_CONN_LOCALLY_ABORTED]            = "LocAbort",
+       [RXRPC_CONN_ABORTED]                    = "Aborted ",
 };
 
 /*
@@ -51,6 +51,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
        struct rxrpc_local *local;
        struct rxrpc_call *call;
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+       enum rxrpc_call_state state;
        unsigned long timeout = 0;
        rxrpc_seq_t acks_hard_ack;
        char lbuff[50], rbuff[50];
@@ -75,7 +76,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
 
        sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
 
-       if (call->state != RXRPC_CALL_SERVER_PREALLOC) {
+       state = rxrpc_call_state(call);
+       if (state != RXRPC_CALL_SERVER_PREALLOC) {
                timeout = READ_ONCE(call->expect_rx_by);
                timeout -= jiffies;
        }
@@ -92,7 +94,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
                   call->call_id,
                   rxrpc_is_service_call(call) ? "Svc" : "Clt",
                   refcount_read(&call->ref),
-                  rxrpc_call_states[call->state],
+                  rxrpc_call_states[state],
                   call->abort_code,
                   call->debug_id,
                   acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
@@ -143,6 +145,7 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
 {
        struct rxrpc_connection *conn;
        struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
+       const char *state;
        char lbuff[50], rbuff[50];
 
        if (v == &rxnet->conn_proc_list) {
@@ -163,9 +166,11 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
        }
 
        sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
-
        sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
 print:
+       state = rxrpc_is_conn_aborted(conn) ?
+               rxrpc_call_completions[conn->completion] :
+               rxrpc_conn_states[conn->state];
        seq_printf(seq,
                   "UDP   %-47.47s %-47.47s %4x %08x %s %3u %3d"
                   " %s %08x %08x %08x %08x %08x %08x %08x\n",
@@ -176,7 +181,7 @@ print:
                   rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
                   refcount_read(&conn->ref),
                   atomic_read(&conn->active),
-                  rxrpc_conn_states[conn->state],
+                  state,
                   key_serial(conn->key),
                   atomic_read(&conn->serial),
                   conn->hi_serial,
index 36b25d003cf00229fabd3820390f680c0fda8028..dd54ceee7bcc8e3fbf4d517b95f6e01b38a0d41e 100644 (file)
@@ -58,85 +58,6 @@ void rxrpc_notify_socket(struct rxrpc_call *call)
        _leave("");
 }
 
-/*
- * Transition a call to the complete state.
- */
-bool __rxrpc_set_call_completion(struct rxrpc_call *call,
-                                enum rxrpc_call_completion compl,
-                                u32 abort_code,
-                                int error)
-{
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               call->abort_code = abort_code;
-               call->error = error;
-               call->completion = compl;
-               call->state = RXRPC_CALL_COMPLETE;
-               trace_rxrpc_call_complete(call);
-               wake_up(&call->waitq);
-               rxrpc_notify_socket(call);
-               return true;
-       }
-       return false;
-}
-
-bool rxrpc_set_call_completion(struct rxrpc_call *call,
-                              enum rxrpc_call_completion compl,
-                              u32 abort_code,
-                              int error)
-{
-       bool ret = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               write_lock(&call->state_lock);
-               ret = __rxrpc_set_call_completion(call, compl, abort_code, error);
-               write_unlock(&call->state_lock);
-       }
-       return ret;
-}
-
-/*
- * Record that a call successfully completed.
- */
-bool __rxrpc_call_completed(struct rxrpc_call *call)
-{
-       return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0);
-}
-
-bool rxrpc_call_completed(struct rxrpc_call *call)
-{
-       bool ret = false;
-
-       if (call->state < RXRPC_CALL_COMPLETE) {
-               write_lock(&call->state_lock);
-               ret = __rxrpc_call_completed(call);
-               write_unlock(&call->state_lock);
-       }
-       return ret;
-}
-
-/*
- * Record that a call is locally aborted.
- */
-bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call,
-                       rxrpc_seq_t seq, u32 abort_code, int error)
-{
-       trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq,
-                         abort_code, error);
-       return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED,
-                                          abort_code, error);
-}
-
-bool rxrpc_abort_call(const char *why, struct rxrpc_call *call,
-                     rxrpc_seq_t seq, u32 abort_code, int error)
-{
-       bool ret;
-
-       write_lock(&call->state_lock);
-       ret = __rxrpc_abort_call(why, call, seq, abort_code, error);
-       write_unlock(&call->state_lock);
-       return ret;
-}
-
 /*
  * Pass a call terminating message to userspace.
  */
@@ -168,7 +89,7 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
                ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp);
                break;
        default:
-               pr_err("Invalid terminal call state %u\n", call->state);
+               pr_err("Invalid terminal call state %u\n", call->completion);
                BUG();
                break;
        }
@@ -179,41 +100,6 @@ static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg)
        return ret;
 }
 
-/*
- * End the packet reception phase.
- */
-static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
-{
-       rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
-
-       _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]);
-
-       trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
-
-       if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY)
-               rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
-
-       write_lock(&call->state_lock);
-
-       switch (call->state) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-               __rxrpc_call_completed(call);
-               write_unlock(&call->state_lock);
-               break;
-
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-               call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
-               call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
-               write_unlock(&call->state_lock);
-               rxrpc_propose_delay_ACK(call, serial,
-                                       rxrpc_propose_ack_processing_op);
-               break;
-       default:
-               write_unlock(&call->state_lock);
-               break;
-       }
-}
-
 /*
  * Discard a packet we've used up and advance the Rx window by one.
  */
@@ -244,10 +130,9 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
 
        trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate,
                            serial, call->rx_consumed);
-       if (last) {
-               rxrpc_end_rx_phase(call, serial);
-               return;
-       }
+
+       if (last)
+               set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags);
 
        /* Check to see if there's an ACK that needs sending. */
        acked = atomic_add_return(call->rx_consumed - old_consumed,
@@ -272,7 +157,8 @@ static int rxrpc_verify_data(struct rxrpc_call *call, struct sk_buff *skb)
 /*
  * Deliver messages to a call.  This keeps processing packets until the buffer
  * is filled and we find either more DATA (returns 0) or the end of the DATA
- * (returns 1).  If more packets are required, it returns -EAGAIN.
+ * (returns 1).  If more packets are required, it returns -EAGAIN and if the
+ * call has failed it returns -EIO.
  */
 static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                              struct msghdr *msg, struct iov_iter *iter,
@@ -288,7 +174,13 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        rx_pkt_offset = call->rx_pkt_offset;
        rx_pkt_len = call->rx_pkt_len;
 
-       if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
+       if (rxrpc_call_has_failed(call)) {
+               seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
+               ret = -EIO;
+               goto done;
+       }
+
+       if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) {
                seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1;
                ret = 1;
                goto done;
@@ -312,14 +204,15 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 
                if (rx_pkt_offset == 0) {
                        ret2 = rxrpc_verify_data(call, skb);
-                       rx_pkt_offset = sp->offset;
-                       rx_pkt_len = sp->len;
                        trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq,
-                                            rx_pkt_offset, rx_pkt_len, ret2);
+                                            sp->offset, sp->len, ret2);
                        if (ret2 < 0) {
+                               kdebug("verify = %d", ret2);
                                ret = ret2;
                                goto out;
                        }
+                       rx_pkt_offset = sp->offset;
+                       rx_pkt_len = sp->len;
                } else {
                        trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq,
                                             rx_pkt_offset, rx_pkt_len, 0);
@@ -388,13 +281,14 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        struct rxrpc_call *call;
        struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
        struct list_head *l;
+       unsigned int call_debug_id = 0;
        size_t copied = 0;
        long timeo;
        int ret;
 
        DEFINE_WAIT(wait);
 
-       trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_enter, 0);
+       trace_rxrpc_recvmsg(0, rxrpc_recvmsg_enter, 0);
 
        if (flags & (MSG_OOB | MSG_TRUNC))
                return -EOPNOTSUPP;
@@ -431,7 +325,7 @@ try_again:
                if (list_empty(&rx->recvmsg_q)) {
                        if (signal_pending(current))
                                goto wait_interrupted;
-                       trace_rxrpc_recvmsg(NULL, rxrpc_recvmsg_wait, 0);
+                       trace_rxrpc_recvmsg(0, rxrpc_recvmsg_wait, 0);
                        timeo = schedule_timeout(timeo);
                }
                finish_wait(sk_sleep(&rx->sk), &wait);
@@ -450,7 +344,8 @@ try_again:
                rxrpc_get_call(call, rxrpc_call_get_recvmsg);
        write_unlock(&rx->recvmsg_lock);
 
-       trace_rxrpc_recvmsg(call, rxrpc_recvmsg_dequeue, 0);
+       call_debug_id = call->debug_id;
+       trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_dequeue, 0);
 
        /* We're going to drop the socket lock, so we need to lock the call
         * against interference by sendmsg.
@@ -492,36 +387,36 @@ try_again:
                msg->msg_namelen = len;
        }
 
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-               ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
-                                        flags, &copied);
-               if (ret == -EAGAIN)
-                       ret = 0;
-
-               if (!skb_queue_empty(&call->recvmsg_queue))
-                       rxrpc_notify_socket(call);
-               break;
-       default:
+       ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len,
+                                flags, &copied);
+       if (ret == -EAGAIN)
                ret = 0;
-               break;
-       }
-
+       if (ret == -EIO)
+               goto call_failed;
        if (ret < 0)
                goto error_unlock_call;
 
-       if (call->state == RXRPC_CALL_COMPLETE) {
-               ret = rxrpc_recvmsg_term(call, msg);
-               if (ret < 0)
-                       goto error_unlock_call;
-               if (!(flags & MSG_PEEK))
-                       rxrpc_release_call(rx, call);
-               msg->msg_flags |= MSG_EOR;
-               ret = 1;
-       }
+       if (rxrpc_call_is_complete(call) &&
+           skb_queue_empty(&call->recvmsg_queue))
+               goto call_complete;
+       if (rxrpc_call_has_failed(call))
+               goto call_failed;
 
+       rxrpc_notify_socket(call);
+       goto not_yet_complete;
+
+call_failed:
+       rxrpc_purge_queue(&call->recvmsg_queue);
+call_complete:
+       ret = rxrpc_recvmsg_term(call, msg);
+       if (ret < 0)
+               goto error_unlock_call;
+       if (!(flags & MSG_PEEK))
+               rxrpc_release_call(rx, call);
+       msg->msg_flags |= MSG_EOR;
+       ret = 1;
+
+not_yet_complete:
        if (ret == 0)
                msg->msg_flags |= MSG_MORE;
        else
@@ -531,7 +426,7 @@ try_again:
 error_unlock_call:
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put_recvmsg);
-       trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
+       trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
        return ret;
 
 error_requeue_call:
@@ -539,14 +434,14 @@ error_requeue_call:
                write_lock(&rx->recvmsg_lock);
                list_add(&call->recvmsg_link, &rx->recvmsg_q);
                write_unlock(&rx->recvmsg_lock);
-               trace_rxrpc_recvmsg(call, rxrpc_recvmsg_requeue, 0);
+               trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_requeue, 0);
        } else {
                rxrpc_put_call(call, rxrpc_call_put_recvmsg);
        }
 error_no_call:
        release_sock(&rx->sk);
 error_trace:
-       trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, ret);
+       trace_rxrpc_recvmsg(call_debug_id, rxrpc_recvmsg_return, ret);
        return ret;
 
 wait_interrupted:
@@ -584,49 +479,34 @@ int rxrpc_kernel_recv_data(struct socket *sock, struct rxrpc_call *call,
        size_t offset = 0;
        int ret;
 
-       _enter("{%d,%s},%zu,%d",
-              call->debug_id, rxrpc_call_states[call->state],
-              *_len, want_more);
-
-       ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING);
+       _enter("{%d},%zu,%d", call->debug_id, *_len, want_more);
 
        mutex_lock(&call->user_mutex);
 
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_RECV_REPLY:
-       case RXRPC_CALL_SERVER_RECV_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-               ret = rxrpc_recvmsg_data(sock, call, NULL, iter,
-                                        *_len, 0, &offset);
-               *_len -= offset;
-               if (ret < 0)
-                       goto out;
-
-               /* We can only reach here with a partially full buffer if we
-                * have reached the end of the data.  We must otherwise have a
-                * full buffer or have been given -EAGAIN.
-                */
-               if (ret == 1) {
-                       if (iov_iter_count(iter) > 0)
-                               goto short_data;
-                       if (!want_more)
-                               goto read_phase_complete;
-                       ret = 0;
-                       goto out;
-               }
-
-               if (!want_more)
-                       goto excess_data;
+       ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset);
+       *_len -= offset;
+       if (ret == -EIO)
+               goto call_failed;
+       if (ret < 0)
                goto out;
 
-       case RXRPC_CALL_COMPLETE:
-               goto call_complete;
-
-       default:
-               ret = -EINPROGRESS;
+       /* We can only reach here with a partially full buffer if we have
+        * reached the end of the data.  We must otherwise have a full buffer
+        * or have been given -EAGAIN.
+        */
+       if (ret == 1) {
+               if (iov_iter_count(iter) > 0)
+                       goto short_data;
+               if (!want_more)
+                       goto read_phase_complete;
+               ret = 0;
                goto out;
        }
 
+       if (!want_more)
+               goto excess_data;
+       goto out;
+
 read_phase_complete:
        ret = 1;
 out:
@@ -637,14 +517,18 @@ out:
        return ret;
 
 short_data:
-       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data"));
+       trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data,
+                         call->cid, call->call_id, call->rx_consumed,
+                         0, -EBADMSG);
        ret = -EBADMSG;
        goto out;
 excess_data:
-       trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data"));
+       trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data,
+                         call->cid, call->call_id, call->rx_consumed,
+                         0, -EMSGSIZE);
        ret = -EMSGSIZE;
        goto out;
-call_complete:
+call_failed:
        *_abort = call->abort_code;
        ret = call->error;
        if (call->completion == RXRPC_CALL_SUCCEEDED) {
index d1233720e05f2404c3ad24e3b2630f8b74335906..1bf571a66e020d263ceb1d5a4489253b8fbf9728 100644 (file)
@@ -411,18 +411,15 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv;
        struct scatterlist sg[16];
-       bool aborted;
        u32 data_size, buf;
        u16 check;
        int ret;
 
        _enter("");
 
-       if (sp->len < 8) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (sp->len < 8)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_1_short_header);
 
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
@@ -442,11 +439,9 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        skcipher_request_zero(req);
 
        /* Extract the decrypted packet length */
-       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_1_short_encdata);
        sp->offset += sizeof(sechdr);
        sp->len    -= sizeof(sechdr);
 
@@ -456,26 +451,16 @@ static int rxkad_verify_packet_1(struct rxrpc_call *call, struct sk_buff *skb,
        check = buf >> 16;
        check ^= seq ^ call->call_id;
        check &= 0xffff;
-       if (check != 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
-
-       if (data_size > sp->len) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (check != 0)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_1_short_check);
+       if (data_size > sp->len)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_1_short_data);
        sp->len = data_size;
 
        _leave(" = 0 [dlen=%x]", data_size);
        return 0;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
 }
 
 /*
@@ -490,18 +475,15 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv;
        struct scatterlist _sg[4], *sg;
-       bool aborted;
        u32 data_size, buf;
        u16 check;
        int nsg, ret;
 
        _enter(",{%d}", sp->len);
 
-       if (sp->len < 8) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (sp->len < 8)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_2_short_header);
 
        /* Decrypt the skbuff in-place.  TODO: We really want to decrypt
         * directly into the target buffer.
@@ -513,7 +495,7 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        } else {
                sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO);
                if (!sg)
-                       goto nomem;
+                       return -ENOMEM;
        }
 
        sg_init_table(sg, nsg);
@@ -537,11 +519,9 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
                kfree(sg);
 
        /* Extract the decrypted packet length */
-       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_2_short_len);
        sp->offset += sizeof(sechdr);
        sp->len    -= sizeof(sechdr);
 
@@ -551,30 +531,17 @@ static int rxkad_verify_packet_2(struct rxrpc_call *call, struct sk_buff *skb,
        check = buf >> 16;
        check ^= seq ^ call->call_id;
        check &= 0xffff;
-       if (check != 0) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
-       }
+       if (check != 0)
+               return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                         rxkad_abort_2_short_check);
 
-       if (data_size > sp->len) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L",
-                                            RXKADDATALEN);
-               goto protocol_error;
-       }
+       if (data_size > sp->len)
+               return rxrpc_abort_eproto(call, skb, RXKADDATALEN,
+                                         rxkad_abort_2_short_data);
 
        sp->len = data_size;
        _leave(" = 0 [dlen=%x]", data_size);
        return 0;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
-
-nomem:
-       _leave(" = -ENOMEM");
-       return -ENOMEM;
 }
 
 /*
@@ -590,7 +557,6 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                __be32 buf[2];
        } crypto __aligned(8);
        rxrpc_seq_t seq = sp->hdr.seq;
-       bool aborted;
        int ret;
        u16 cksum;
        u32 x, y;
@@ -627,9 +593,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                cksum = 1; /* zero checksums are not permitted */
 
        if (cksum != sp->hdr.cksum) {
-               aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK",
-                                            RXKADSEALEDINCON);
-               goto protocol_error;
+               ret = rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON,
+                                        rxkad_abort_bad_checksum);
+               goto out;
        }
 
        switch (call->conn->security_level) {
@@ -647,13 +613,9 @@ static int rxkad_verify_packet(struct rxrpc_call *call, struct sk_buff *skb)
                break;
        }
 
+out:
        skcipher_request_free(req);
        return ret;
-
-protocol_error:
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-       return -EPROTO;
 }
 
 /*
@@ -821,34 +783,30 @@ static int rxkad_encrypt_response(struct rxrpc_connection *conn,
  * respond to a challenge packet
  */
 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
-                                     struct sk_buff *skb,
-                                     u32 *_abort_code)
+                                     struct sk_buff *skb)
 {
        const struct rxrpc_key_token *token;
        struct rxkad_challenge challenge;
        struct rxkad_response *resp;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-       const char *eproto;
-       u32 version, nonce, min_level, abort_code;
-       int ret;
+       u32 version, nonce, min_level;
+       int ret = -EPROTO;
 
        _enter("{%d,%x}", conn->debug_id, key_serial(conn->key));
 
-       eproto = tracepoint_string("chall_no_key");
-       abort_code = RX_PROTOCOL_ERROR;
        if (!conn->key)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO,
+                                       rxkad_abort_chall_no_key);
 
-       abort_code = RXKADEXPIRED;
        ret = key_validate(conn->key);
        if (ret < 0)
-               goto other_error;
+               return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+                                       rxkad_abort_chall_key_expired);
 
-       eproto = tracepoint_string("chall_short");
-       abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
                          &challenge, sizeof(challenge)) < 0)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                       rxkad_abort_chall_short);
 
        version = ntohl(challenge.version);
        nonce = ntohl(challenge.nonce);
@@ -856,15 +814,13 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
 
        trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level);
 
-       eproto = tracepoint_string("chall_ver");
-       abort_code = RXKADINCONSISTENCY;
        if (version != RXKAD_VERSION)
-               goto protocol_error;
+               return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+                                       rxkad_abort_chall_version);
 
-       abort_code = RXKADLEVELFAIL;
-       ret = -EACCES;
        if (conn->security_level < min_level)
-               goto other_error;
+               return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES,
+                                       rxkad_abort_chall_level);
 
        token = conn->key->payload.data[0];
 
@@ -893,13 +849,6 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
                ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
        kfree(resp);
        return ret;
-
-protocol_error:
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
-       ret = -EPROTO;
-other_error:
-       *_abort_code = abort_code;
-       return ret;
 }
 
 /*
@@ -910,20 +859,15 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
                                struct sk_buff *skb,
                                void *ticket, size_t ticket_len,
                                struct rxrpc_crypt *_session_key,
-                               time64_t *_expiry,
-                               u32 *_abort_code)
+                               time64_t *_expiry)
 {
        struct skcipher_request *req;
-       struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt iv, key;
        struct scatterlist sg[1];
        struct in_addr addr;
        unsigned int life;
-       const char *eproto;
        time64_t issue, now;
        bool little_endian;
-       int ret;
-       u32 abort_code;
        u8 *p, *q, *name, *end;
 
        _enter("{%d},{%x}", conn->debug_id, key_serial(server_key));
@@ -935,10 +879,9 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
 
        memcpy(&iv, &server_key->payload.data[2], sizeof(iv));
 
-       ret = -ENOMEM;
        req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS);
        if (!req)
-               goto temporary_error;
+               return -ENOMEM;
 
        sg_init_one(&sg[0], ticket, ticket_len);
        skcipher_request_set_callback(req, 0, NULL, NULL);
@@ -949,18 +892,21 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p = ticket;
        end = p + ticket_len;
 
-#define Z(field)                                       \
-       ({                                              \
-               u8 *__str = p;                          \
-               eproto = tracepoint_string("rxkad_bad_"#field); \
-               q = memchr(p, 0, end - p);              \
-               if (!q || q - p > (field##_SZ))         \
-                       goto bad_ticket;                \
-               for (; p < q; p++)                      \
-                       if (!isprint(*p))               \
-                               goto bad_ticket;        \
-               p++;                                    \
-               __str;                                  \
+#define Z(field, fieldl)                                               \
+       ({                                                              \
+               u8 *__str = p;                                          \
+               q = memchr(p, 0, end - p);                              \
+               if (!q || q - p > field##_SZ)                           \
+                       return rxrpc_abort_conn(                        \
+                               conn, skb, RXKADBADTICKET, -EPROTO,     \
+                               rxkad_abort_resp_tkt_##fieldl);         \
+               for (; p < q; p++)                                      \
+                       if (!isprint(*p))                               \
+                               return rxrpc_abort_conn(                \
+                                       conn, skb, RXKADBADTICKET, -EPROTO, \
+                                       rxkad_abort_resp_tkt_##fieldl); \
+               p++;                                                    \
+               __str;                                                  \
        })
 
        /* extract the ticket flags */
@@ -969,20 +915,20 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        p++;
 
        /* extract the authentication name */
-       name = Z(ANAME);
+       name = Z(ANAME, aname);
        _debug("KIV ANAME: %s", name);
 
        /* extract the principal's instance */
-       name = Z(INST);
+       name = Z(INST, inst);
        _debug("KIV INST : %s", name);
 
        /* extract the principal's authentication domain */
-       name = Z(REALM);
+       name = Z(REALM, realm);
        _debug("KIV REALM: %s", name);
 
-       eproto = tracepoint_string("rxkad_bad_len");
        if (end - p < 4 + 8 + 4 + 2)
-               goto bad_ticket;
+               return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO,
+                                       rxkad_abort_resp_tkt_short);
 
        /* get the IPv4 address of the entity that requested the ticket */
        memcpy(&addr, p, sizeof(addr));
@@ -1014,38 +960,23 @@ static int rxkad_decrypt_ticket(struct rxrpc_connection *conn,
        _debug("KIV ISSUE: %llx [%llx]", issue, now);
 
        /* check the ticket is in date */
-       if (issue > now) {
-               abort_code = RXKADNOAUTH;
-               ret = -EKEYREJECTED;
-               goto other_error;
-       }
-
-       if (issue < now - life) {
-               abort_code = RXKADEXPIRED;
-               ret = -EKEYEXPIRED;
-               goto other_error;
-       }
+       if (issue > now)
+               return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, -EKEYREJECTED,
+                                       rxkad_abort_resp_tkt_future);
+       if (issue < now - life)
+               return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, -EKEYEXPIRED,
+                                       rxkad_abort_resp_tkt_expired);
 
        *_expiry = issue + life;
 
        /* get the service name */
-       name = Z(SNAME);
+       name = Z(SNAME, sname);
        _debug("KIV SNAME: %s", name);
 
        /* get the service instance name */
-       name = Z(INST);
+       name = Z(INST, sinst);
        _debug("KIV SINST: %s", name);
        return 0;
-
-bad_ticket:
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
-       abort_code = RXKADBADTICKET;
-       ret = -EPROTO;
-other_error:
-       *_abort_code = abort_code;
-       return ret;
-temporary_error:
-       return ret;
 }
 
 /*
@@ -1086,17 +1017,15 @@ static void rxkad_decrypt_response(struct rxrpc_connection *conn,
  * verify a response
  */
 static int rxkad_verify_response(struct rxrpc_connection *conn,
-                                struct sk_buff *skb,
-                                u32 *_abort_code)
+                                struct sk_buff *skb)
 {
        struct rxkad_response *response;
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        struct rxrpc_crypt session_key;
        struct key *server_key;
-       const char *eproto;
        time64_t expiry;
        void *ticket;
-       u32 abort_code, version, kvno, ticket_len, level;
+       u32 version, kvno, ticket_len, level;
        __be32 csum;
        int ret, i;
 
@@ -1104,22 +1033,18 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 
        server_key = rxrpc_look_up_server_security(conn, skb, 0, 0);
        if (IS_ERR(server_key)) {
-               switch (PTR_ERR(server_key)) {
+               ret = PTR_ERR(server_key);
+               switch (ret) {
                case -ENOKEY:
-                       abort_code = RXKADUNKNOWNKEY;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, ret,
+                                               rxkad_abort_resp_nokey);
                case -EKEYEXPIRED:
-                       abort_code = RXKADEXPIRED;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret,
+                                               rxkad_abort_resp_key_expired);
                default:
-                       abort_code = RXKADNOAUTH;
-                       break;
+                       return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, ret,
+                                               rxkad_abort_resp_key_rejected);
                }
-               trace_rxrpc_abort(0, "SVK",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 abort_code, PTR_ERR(server_key));
-               *_abort_code = abort_code;
-               return -EPROTO;
        }
 
        ret = -ENOMEM;
@@ -1127,11 +1052,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        if (!response)
                goto temporary_error;
 
-       eproto = tracepoint_string("rxkad_rsp_short");
-       abort_code = RXKADPACKETSHORT;
        if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
-                         response, sizeof(*response)) < 0)
+                         response, sizeof(*response)) < 0) {
+               rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                rxkad_abort_resp_short);
                goto protocol_error;
+       }
 
        version = ntohl(response->version);
        ticket_len = ntohl(response->ticket_len);
@@ -1139,20 +1065,23 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 
        trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len);
 
-       eproto = tracepoint_string("rxkad_rsp_ver");
-       abort_code = RXKADINCONSISTENCY;
-       if (version != RXKAD_VERSION)
+       if (version != RXKAD_VERSION) {
+               rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO,
+                                rxkad_abort_resp_version);
                goto protocol_error;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_tktlen");
-       abort_code = RXKADTICKETLEN;
-       if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN)
+       if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) {
+               rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO,
+                                rxkad_abort_resp_tkt_len);
                goto protocol_error;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_unkkey");
-       abort_code = RXKADUNKNOWNKEY;
-       if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5)
+       if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) {
+               rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO,
+                                rxkad_abort_resp_unknown_tkt);
                goto protocol_error;
+       }
 
        /* extract the kerberos ticket and decrypt and decode it */
        ret = -ENOMEM;
@@ -1160,15 +1089,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        if (!ticket)
                goto temporary_error_free_resp;
 
-       eproto = tracepoint_string("rxkad_tkt_short");
-       abort_code = RXKADPACKETSHORT;
-       ret = skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
-                           ticket, ticket_len);
-       if (ret < 0)
-               goto temporary_error_free_ticket;
+       if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response),
+                         ticket, ticket_len) < 0) {
+               rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO,
+                                rxkad_abort_resp_short_tkt);
+               goto protocol_error;
+       }
 
        ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len,
-                                  &session_key, &expiry, _abort_code);
+                                  &session_key, &expiry);
        if (ret < 0)
                goto temporary_error_free_ticket;
 
@@ -1176,56 +1105,61 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
         * response */
        rxkad_decrypt_response(conn, response, &session_key);
 
-       eproto = tracepoint_string("rxkad_rsp_param");
-       abort_code = RXKADSEALEDINCON;
-       if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
-               goto protocol_error_free;
-       if (ntohl(response->encrypted.cid) != conn->proto.cid)
-               goto protocol_error_free;
-       if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
+       if (ntohl(response->encrypted.epoch) != conn->proto.epoch ||
+           ntohl(response->encrypted.cid) != conn->proto.cid ||
+           ntohl(response->encrypted.securityIndex) != conn->security_ix) {
+               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                rxkad_abort_resp_bad_param);
                goto protocol_error_free;
+       }
+
        csum = response->encrypted.checksum;
        response->encrypted.checksum = 0;
        rxkad_calc_response_checksum(response);
-       eproto = tracepoint_string("rxkad_rsp_csum");
-       if (response->encrypted.checksum != csum)
+       if (response->encrypted.checksum != csum) {
+               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                rxkad_abort_resp_bad_checksum);
                goto protocol_error_free;
+       }
 
-       spin_lock(&conn->bundle->channel_lock);
        for (i = 0; i < RXRPC_MAXCALLS; i++) {
-               struct rxrpc_call *call;
                u32 call_id = ntohl(response->encrypted.call_id[i]);
+               u32 counter = READ_ONCE(conn->channels[i].call_counter);
+
+               if (call_id > INT_MAX) {
+                       rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                        rxkad_abort_resp_bad_callid);
+                       goto protocol_error_free;
+               }
 
-               eproto = tracepoint_string("rxkad_rsp_callid");
-               if (call_id > INT_MAX)
-                       goto protocol_error_unlock;
-
-               eproto = tracepoint_string("rxkad_rsp_callctr");
-               if (call_id < conn->channels[i].call_counter)
-                       goto protocol_error_unlock;
-
-               eproto = tracepoint_string("rxkad_rsp_callst");
-               if (call_id > conn->channels[i].call_counter) {
-                       call = rcu_dereference_protected(
-                               conn->channels[i].call,
-                               lockdep_is_held(&conn->bundle->channel_lock));
-                       if (call && call->state < RXRPC_CALL_COMPLETE)
-                               goto protocol_error_unlock;
+               if (call_id < counter) {
+                       rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                        rxkad_abort_resp_call_ctr);
+                       goto protocol_error_free;
+               }
+
+               if (call_id > counter) {
+                       if (conn->channels[i].call) {
+                               rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO,
+                                                rxkad_abort_resp_call_state);
+                               goto protocol_error_free;
+                       }
                        conn->channels[i].call_counter = call_id;
                }
        }
-       spin_unlock(&conn->bundle->channel_lock);
 
-       eproto = tracepoint_string("rxkad_rsp_seq");
-       abort_code = RXKADOUTOFSEQUENCE;
-       if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1)
+       if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) {
+               rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO,
+                                rxkad_abort_resp_ooseq);
                goto protocol_error_free;
+       }
 
-       eproto = tracepoint_string("rxkad_rsp_level");
-       abort_code = RXKADLEVELFAIL;
        level = ntohl(response->encrypted.level);
-       if (level > RXRPC_SECURITY_ENCRYPT)
+       if (level > RXRPC_SECURITY_ENCRYPT) {
+               rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO,
+                                rxkad_abort_resp_level);
                goto protocol_error_free;
+       }
        conn->security_level = level;
 
        /* create a key to hold the security data and expiration time - after
@@ -1240,15 +1174,11 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
        _leave(" = 0");
        return 0;
 
-protocol_error_unlock:
-       spin_unlock(&conn->bundle->channel_lock);
 protocol_error_free:
        kfree(ticket);
 protocol_error:
        kfree(response);
-       trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
        key_put(server_key);
-       *_abort_code = abort_code;
        return -EPROTO;
 
 temporary_error_free_ticket:
index d33a109e846c1bdfc9072c87ba0543ef3293272e..16dcabb71ebe164c85aaf2d7c1b173a55b5da9c2 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/slab.h>
 #include <net/sock.h>
 #include <net/af_rxrpc.h>
+#define RXRPC_TRACE_ONLY_DEFINE_ENUMS
+#include <trace/events/rxrpc.h>
 
 MODULE_DESCRIPTION("rxperf test server (afs)");
 MODULE_AUTHOR("Red Hat, Inc.");
@@ -307,12 +309,14 @@ static void rxperf_deliver_to_call(struct work_struct *work)
                case -EOPNOTSUPP:
                        abort_code = RXGEN_OPCODE;
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               abort_code, ret, "GOP");
+                                               abort_code, ret,
+                                               rxperf_abort_op_not_supported);
                        goto call_complete;
                case -ENOTSUPP:
                        abort_code = RX_USER_ABORT;
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               abort_code, ret, "GUA");
+                                               abort_code, ret,
+                                               rxperf_abort_op_not_supported);
                        goto call_complete;
                case -EIO:
                        pr_err("Call %u in bad state %u\n",
@@ -324,11 +328,13 @@ static void rxperf_deliver_to_call(struct work_struct *work)
                case -ENOMEM:
                case -EFAULT:
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               RXGEN_SS_UNMARSHAL, ret, "GUM");
+                                               RXGEN_SS_UNMARSHAL, ret,
+                                               rxperf_abort_unmarshal_error);
                        goto call_complete;
                default:
                        rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                               RX_CALL_DEAD, ret, "GER");
+                                               RX_CALL_DEAD, ret,
+                                               rxperf_abort_general_error);
                        goto call_complete;
                }
        }
@@ -523,7 +529,8 @@ static int rxperf_process_call(struct rxperf_call *call)
 
        if (n == -ENOMEM)
                rxrpc_kernel_abort_call(rxperf_socket, call->rxcall,
-                                       RXGEN_SS_MARSHAL, -ENOMEM, "GOM");
+                                       RXGEN_SS_MARSHAL, -ENOMEM,
+                                       rxperf_abort_oom);
        return n;
 }
 
index ab968f65a4900d87869c3d755d8806ec3310a4cc..cb8dd1d3b1d49ed9dc26bdfa301bc263174cd876 100644 (file)
@@ -97,38 +97,31 @@ found:
  */
 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn)
 {
-       const struct rxrpc_security *sec;
        struct rxrpc_key_token *token;
        struct key *key = conn->key;
-       int ret;
+       int ret = 0;
 
        _enter("{%d},{%x}", conn->debug_id, key_serial(key));
 
-       if (!key)
-               return 0;
-
-       ret = key_validate(key);
-       if (ret < 0)
-               return ret;
-
        for (token = key->payload.data[0]; token; token = token->next) {
-               sec = rxrpc_security_lookup(token->security_index);
-               if (sec)
+               if (token->security_index == conn->security->security_index)
                        goto found;
        }
        return -EKEYREJECTED;
 
 found:
-       conn->security = sec;
-
-       ret = conn->security->init_connection_security(conn, token);
-       if (ret < 0) {
-               conn->security = &rxrpc_no_security;
-               return ret;
+       mutex_lock(&conn->security_lock);
+       if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+               ret = conn->security->init_connection_security(conn, token);
+               if (ret == 0) {
+                       spin_lock(&conn->state_lock);
+                       if (conn->state == RXRPC_CONN_CLIENT_UNSECURED)
+                               conn->state = RXRPC_CONN_CLIENT;
+                       spin_unlock(&conn->state_lock);
+               }
        }
-
-       _leave(" = 0");
-       return 0;
+       mutex_unlock(&conn->security_lock);
+       return ret;
 }
 
 /*
@@ -144,21 +137,15 @@ const struct rxrpc_security *rxrpc_get_incoming_security(struct rxrpc_sock *rx,
 
        sec = rxrpc_security_lookup(sp->hdr.securityIndex);
        if (!sec) {
-               trace_rxrpc_abort(0, "SVS",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 RX_INVALID_OPERATION, EKEYREJECTED);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = RX_INVALID_OPERATION;
+               rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security,
+                                  RX_INVALID_OPERATION, -EKEYREJECTED);
                return NULL;
        }
 
        if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE &&
            !rx->securities) {
-               trace_rxrpc_abort(0, "SVR",
-                                 sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
-                                 RX_INVALID_OPERATION, EKEYREJECTED);
-               skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
-               skb->priority = sec->no_key_abort;
+               rxrpc_direct_abort(skb, rxrpc_abort_no_service_key,
+                                  sec->no_key_abort, -EKEYREJECTED);
                return NULL;
        }
 
@@ -191,9 +178,9 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
                sprintf(kdesc, "%u:%u",
                        sp->hdr.serviceId, sp->hdr.securityIndex);
 
-       rcu_read_lock();
+       read_lock(&conn->local->services_lock);
 
-       rx = rcu_dereference(conn->local->service);
+       rx = conn->local->service;
        if (!rx)
                goto out;
 
@@ -215,6 +202,6 @@ struct key *rxrpc_look_up_server_security(struct rxrpc_connection *conn,
        }
 
 out:
-       rcu_read_unlock();
+       read_unlock(&conn->local->services_lock);
        return key;
 }
index cde1e65f16b4548c218b653aa306aa29a16ca13f..da49fcf1c45674ce8299aa7bd927679a31bf2eb1 100644 (file)
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
+/*
+ * Propose an abort to be made in the I/O thread.
+ */
+bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error,
+                        enum rxrpc_abort_reason why)
+{
+       _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
+
+       if (!call->send_abort && !rxrpc_call_is_complete(call)) {
+               call->send_abort_why = why;
+               call->send_abort_err = error;
+               call->send_abort_seq = 0;
+               /* Request abort locklessly vs rxrpc_input_call_event(). */
+               smp_store_release(&call->send_abort, abort_code);
+               rxrpc_poke_call(call, rxrpc_call_poke_abort);
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Wait for a call to become connected.  Interruption here doesn't cause the
+ * call to be aborted.
+ */
+static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
+{
+       DECLARE_WAITQUEUE(myself, current);
+       int ret = 0;
+
+       _enter("%d", call->debug_id);
+
+       if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
+               return call->error;
+
+       add_wait_queue_exclusive(&call->waitq, &myself);
+
+       for (;;) {
+               ret = call->error;
+               if (ret < 0)
+                       break;
+
+               switch (call->interruptibility) {
+               case RXRPC_INTERRUPTIBLE:
+               case RXRPC_PREINTERRUPTIBLE:
+                       set_current_state(TASK_INTERRUPTIBLE);
+                       break;
+               case RXRPC_UNINTERRUPTIBLE:
+               default:
+                       set_current_state(TASK_UNINTERRUPTIBLE);
+                       break;
+               }
+               if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
+                       ret = call->error;
+                       break;
+               }
+               if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
+                    call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
+                   signal_pending(current)) {
+                       ret = sock_intr_errno(*timeo);
+                       break;
+               }
+               *timeo = schedule_timeout(*timeo);
+       }
+
+       remove_wait_queue(&call->waitq, &myself);
+       __set_current_state(TASK_RUNNING);
+
+       if (ret == 0 && rxrpc_call_is_complete(call))
+               ret = call->error;
+
+       _leave(" = %d", ret);
+       return ret;
+}
+
 /*
  * Return true if there's sufficient Tx queue space.
  */
@@ -39,7 +114,7 @@ static int rxrpc_wait_for_tx_window_intr(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                if (signal_pending(current))
@@ -74,7 +149,7 @@ static int rxrpc_wait_for_tx_window_waitall(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, &tx_win))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                if (timeout == 0 &&
@@ -103,7 +178,7 @@ static int rxrpc_wait_for_tx_window_nonintr(struct rxrpc_sock *rx,
                if (rxrpc_check_tx_space(call, NULL))
                        return 0;
 
-               if (call->state >= RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        return call->error;
 
                trace_rxrpc_txqueue(call, rxrpc_txqueue_wait);
@@ -168,7 +243,6 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
                               struct rxrpc_txbuf *txb,
                               rxrpc_notify_end_tx_t notify_end_tx)
 {
-       unsigned long now;
        rxrpc_seq_t seq = txb->seq;
        bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke;
 
@@ -191,36 +265,10 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
        poke = list_empty(&call->tx_sendmsg);
        list_add_tail(&txb->call_link, &call->tx_sendmsg);
        call->tx_prepared = seq;
+       if (last)
+               rxrpc_notify_end_tx(rx, call, notify_end_tx);
        spin_unlock(&call->tx_lock);
 
-       if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) {
-               _debug("________awaiting reply/ACK__________");
-               write_lock(&call->state_lock);
-               switch (call->state) {
-               case RXRPC_CALL_CLIENT_SEND_REQUEST:
-                       call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
-                       rxrpc_notify_end_tx(rx, call, notify_end_tx);
-                       break;
-               case RXRPC_CALL_SERVER_ACK_REQUEST:
-                       call->state = RXRPC_CALL_SERVER_SEND_REPLY;
-                       now = jiffies;
-                       WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET);
-                       if (call->ackr_reason == RXRPC_ACK_DELAY)
-                               call->ackr_reason = 0;
-                       trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
-                       if (!last)
-                               break;
-                       fallthrough;
-               case RXRPC_CALL_SERVER_SEND_REPLY:
-                       call->state = RXRPC_CALL_SERVER_AWAIT_ACK;
-                       rxrpc_notify_end_tx(rx, call, notify_end_tx);
-                       break;
-               default:
-                       break;
-               }
-               write_unlock(&call->state_lock);
-       }
-
        if (poke)
                rxrpc_poke_call(call, rxrpc_call_poke_start);
 }
@@ -245,6 +293,16 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
 
        timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 
+       ret = rxrpc_wait_to_be_connected(call, &timeo);
+       if (ret < 0)
+               return ret;
+
+       if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) {
+               ret = rxrpc_init_client_conn_security(call->conn);
+               if (ret < 0)
+                       return ret;
+       }
+
        /* this should be in poll */
        sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
 
@@ -252,15 +310,20 @@ reload:
        ret = -EPIPE;
        if (sk->sk_shutdown & SEND_SHUTDOWN)
                goto maybe_error;
-       state = READ_ONCE(call->state);
+       state = rxrpc_call_state(call);
        ret = -ESHUTDOWN;
        if (state >= RXRPC_CALL_COMPLETE)
                goto maybe_error;
        ret = -EPROTO;
        if (state != RXRPC_CALL_CLIENT_SEND_REQUEST &&
            state != RXRPC_CALL_SERVER_ACK_REQUEST &&
-           state != RXRPC_CALL_SERVER_SEND_REPLY)
+           state != RXRPC_CALL_SERVER_SEND_REPLY) {
+               /* Request phase complete for this client call */
+               trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send,
+                                 call->cid, call->call_id, call->rx_consumed,
+                                 0, -EPROTO);
                goto maybe_error;
+       }
 
        ret = -EMSGSIZE;
        if (call->tx_total_len != -1) {
@@ -329,7 +392,7 @@ reload:
 
                /* check for the far side aborting the call or a network error
                 * occurring */
-               if (call->state == RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        goto call_terminated;
 
                /* add the packet to the send queue if it's now full */
@@ -354,12 +417,9 @@ reload:
 
 success:
        ret = copied;
-       if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) {
-               read_lock(&call->state_lock);
-               if (call->error < 0)
-                       ret = call->error;
-               read_unlock(&call->state_lock);
-       }
+       if (rxrpc_call_is_complete(call) &&
+           call->error < 0)
+               ret = call->error;
 out:
        call->tx_pending = txb;
        _leave(" = %d", ret);
@@ -543,7 +603,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
                                     atomic_inc_return(&rxrpc_debug_id));
        /* The socket is now unlocked */
 
-       rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp);
        _leave(" = %p\n", call);
        return call;
 }
@@ -556,7 +615,6 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
        __releases(&rx->sk.sk_lock.slock)
 {
-       enum rxrpc_call_state state;
        struct rxrpc_call *call;
        unsigned long now, j;
        bool dropped_lock = false;
@@ -598,10 +656,10 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                        return PTR_ERR(call);
                /* ... and we have the call lock. */
                ret = 0;
-               if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE)
+               if (rxrpc_call_is_complete(call))
                        goto out_put_unlock;
        } else {
-               switch (READ_ONCE(call->state)) {
+               switch (rxrpc_call_state(call)) {
                case RXRPC_CALL_UNINITIALISED:
                case RXRPC_CALL_CLIENT_AWAIT_CONN:
                case RXRPC_CALL_SERVER_PREALLOC:
@@ -655,17 +713,13 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                break;
        }
 
-       state = READ_ONCE(call->state);
-       _debug("CALL %d USR %lx ST %d on CONN %p",
-              call->debug_id, call->user_call_ID, state, call->conn);
-
-       if (state >= RXRPC_CALL_COMPLETE) {
+       if (rxrpc_call_is_complete(call)) {
                /* it's too late for this call */
                ret = -ESHUTDOWN;
        } else if (p.command == RXRPC_CMD_SEND_ABORT) {
+               rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED,
+                                   rxrpc_abort_call_sendmsg);
                ret = 0;
-               if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED))
-                       ret = rxrpc_send_abort_packet(call);
        } else if (p.command != RXRPC_CMD_SEND_DATA) {
                ret = -EINVAL;
        } else {
@@ -705,34 +759,17 @@ int rxrpc_kernel_send_data(struct socket *sock, struct rxrpc_call *call,
        bool dropped_lock = false;
        int ret;
 
-       _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]);
+       _enter("{%d},", call->debug_id);
 
        ASSERTCMP(msg->msg_name, ==, NULL);
        ASSERTCMP(msg->msg_control, ==, NULL);
 
        mutex_lock(&call->user_mutex);
 
-       _debug("CALL %d USR %lx ST %d on CONN %p",
-              call->debug_id, call->user_call_ID, call->state, call->conn);
-
-       switch (READ_ONCE(call->state)) {
-       case RXRPC_CALL_CLIENT_SEND_REQUEST:
-       case RXRPC_CALL_SERVER_ACK_REQUEST:
-       case RXRPC_CALL_SERVER_SEND_REPLY:
-               ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
-                                     notify_end_tx, &dropped_lock);
-               break;
-       case RXRPC_CALL_COMPLETE:
-               read_lock(&call->state_lock);
+       ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len,
+                             notify_end_tx, &dropped_lock);
+       if (ret == -ESHUTDOWN)
                ret = call->error;
-               read_unlock(&call->state_lock);
-               break;
-       default:
-               /* Request phase complete for this client call */
-               trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send"));
-               ret = -EPROTO;
-               break;
-       }
 
        if (!dropped_lock)
                mutex_unlock(&call->user_mutex);
@@ -747,24 +784,20 @@ EXPORT_SYMBOL(rxrpc_kernel_send_data);
  * @call: The call to be aborted
  * @abort_code: The abort code to stick into the ABORT packet
  * @error: Local error value
- * @why: 3-char string indicating why.
+ * @why: Indication as to why.
  *
  * Allow a kernel service to abort a call, if it's still in an abortable state
  * and return true if the call was aborted, false if it was already complete.
  */
 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call,
-                            u32 abort_code, int error, const char *why)
+                            u32 abort_code, int error, enum rxrpc_abort_reason why)
 {
        bool aborted;
 
-       _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why);
+       _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why);
 
        mutex_lock(&call->user_mutex);
-
-       aborted = rxrpc_abort_call(why, call, 0, abort_code, error);
-       if (aborted)
-               rxrpc_send_abort_packet(call);
-
+       aborted = rxrpc_propose_abort(call, abort_code, error, why);
        mutex_unlock(&call->user_mutex);
        return aborted;
 }
index ff47ce4d3968540d8830801632edc7a61c8ec871..6b26bdb999d77a9f7663ee13dba2900d4bcbbf92 100644 (file)
@@ -134,6 +134,11 @@ static int valid_label(const struct nlattr *attr,
 {
        const u32 *label = nla_data(attr);
 
+       if (nla_len(attr) != sizeof(*label)) {
+               NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length");
+               return -EINVAL;
+       }
+
        if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) {
                NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range");
                return -EINVAL;
@@ -145,7 +150,8 @@ static int valid_label(const struct nlattr *attr,
 static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = {
        [TCA_MPLS_PARMS]        = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)),
        [TCA_MPLS_PROTO]        = { .type = NLA_U16 },
-       [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label),
+       [TCA_MPLS_LABEL]        = NLA_POLICY_VALIDATE_FN(NLA_BINARY,
+                                                        valid_label),
        [TCA_MPLS_TC]           = NLA_POLICY_RANGE(NLA_U8, 0, 7),
        [TCA_MPLS_TTL]          = NLA_POLICY_MIN(NLA_U8, 1),
        [TCA_MPLS_BOS]          = NLA_POLICY_RANGE(NLA_U8, 0, 1),
index eb0e9458e722e403a15fb6ebf4365a920870d7fa..ee2a050c887bfdec751028d2da007f798cf78589 100644 (file)
@@ -333,7 +333,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                  struct tcindex_filter_result *r, struct nlattr **tb,
                  struct nlattr *est, u32 flags, struct netlink_ext_ack *extack)
 {
-       struct tcindex_filter_result new_filter_result, *old_r = r;
+       struct tcindex_filter_result new_filter_result;
        struct tcindex_data *cp = NULL, *oldp;
        struct tcindex_filter *f = NULL; /* make gcc behave */
        struct tcf_result cr = {};
@@ -402,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
        err = tcindex_filter_result_init(&new_filter_result, cp, net);
        if (err < 0)
                goto errout_alloc;
-       if (old_r)
+       if (r)
                cr = r->res;
 
        err = -EBUSY;
@@ -479,14 +479,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
                tcf_bind_filter(tp, &cr, base);
        }
 
-       if (old_r && old_r != r) {
-               err = tcindex_filter_result_init(old_r, cp, net);
-               if (err < 0) {
-                       kfree(f);
-                       goto errout_alloc;
-               }
-       }
-
        oldp = p;
        r->res = cr;
        tcf_exts_change(&r->exts, &e);
index 2317db02c764d29f9b0fa561a7d3ef08816fe461..72d2c204d5f340eb3905ba98bc0cf6ed1415d80e 100644 (file)
@@ -1133,6 +1133,11 @@ skip:
                        return -ENOENT;
                }
 
+               if (new && new->ops == &noqueue_qdisc_ops) {
+                       NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class");
+                       return -EINVAL;
+               }
+
                err = cops->graft(parent, cl, new, &old, extack);
                if (err)
                        return err;
index f52255fea652beae735e514489db958a6ad1b7a1..4a981ca90b0bfeb3a3b6b2ad21a3b4cb5372db37 100644 (file)
@@ -393,10 +393,13 @@ static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                                result = tcf_classify(skb, NULL, fl, &res, true);
                                if (result < 0)
                                        continue;
+                               if (result == TC_ACT_SHOT)
+                                       goto done;
+
                                flow = (struct atm_flow_data *)res.class;
                                if (!flow)
                                        flow = lookup_flow(sch, res.classid);
-                               goto done;
+                               goto drop;
                        }
                }
                flow = NULL;
index 6568e17c4c634184493d8e5414b6671aecc2d276..36db5f6782f2cfd84888ec6b3ab18a675412f8b7 100644 (file)
@@ -230,6 +230,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                result = tcf_classify(skb, NULL, fl, &res, true);
                if (!fl || result < 0)
                        goto fallback;
+               if (result == TC_ACT_SHOT)
+                       return NULL;
 
                cl = (void *)res.class;
                if (!cl) {
@@ -250,8 +252,6 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                case TC_ACT_TRAP:
                        *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
                        fallthrough;
-               case TC_ACT_SHOT:
-                       return NULL;
                case TC_ACT_RECLASSIFY:
                        return cbq_reclassify(skb, cl);
                }
index a661b062cca85431845ef3abf84205035d3029ae..872d127c9db42a6eb4d67879f25c665ae6e7f0f1 100644 (file)
@@ -377,6 +377,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
        /* Even if driver returns failure adjust the stats - in case offload
         * ended but driver still wants to adjust the values.
         */
+       sch_tree_lock(sch);
        for (i = 0; i < MAX_DPs; i++) {
                if (!table->tab[i])
                        continue;
@@ -393,6 +394,7 @@ static int gred_offload_dump_stats(struct Qdisc *sch)
                sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
        }
        _bstats_update(&sch->bstats, bytes, packets);
+       sch_tree_unlock(sch);
 
        kfree(hw_stats);
        return ret;
index e5b4bbf3ce3d5f36edb512d4017ebd97209bb377..f46643850df84477dfb21708369cbc605916ae07 100644 (file)
@@ -199,8 +199,14 @@ static unsigned long htb_search(struct Qdisc *sch, u32 handle)
 {
        return (unsigned long)htb_find(handle, sch);
 }
+
+#define HTB_DIRECT ((struct htb_class *)-1L)
+
 /**
  * htb_classify - classify a packet into class
+ * @skb: the socket buffer
+ * @sch: the active queue discipline
+ * @qerr: pointer for returned status code
  *
  * It returns NULL if the packet should be dropped or -1 if the packet
  * should be passed directly thru. In all other cases leaf class is returned.
@@ -211,8 +217,6 @@ static unsigned long htb_search(struct Qdisc *sch, u32 handle)
  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
  * then finish and return direct queue.
  */
-#define HTB_DIRECT ((struct htb_class *)-1L)
-
 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
                                      int *qerr)
 {
@@ -1545,7 +1549,7 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
        struct tc_htb_qopt_offload offload_opt;
        struct netdev_queue *dev_queue;
        struct Qdisc *q = cl->leaf.q;
-       struct Qdisc *old = NULL;
+       struct Qdisc *old;
        int err;
 
        if (cl->level)
@@ -1553,14 +1557,17 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
 
        WARN_ON(!q);
        dev_queue = htb_offload_get_queue(cl);
-       old = htb_graft_helper(dev_queue, NULL);
-       if (destroying)
-               /* Before HTB is destroyed, the kernel grafts noop_qdisc to
-                * all queues.
+       /* When destroying, caller qdisc_graft grafts the new qdisc and invokes
+        * qdisc_put for the qdisc being destroyed. htb_destroy_class_offload
+        * does not need to graft or qdisc_put the qdisc being destroyed.
+        */
+       if (!destroying) {
+               old = htb_graft_helper(dev_queue, NULL);
+               /* Last qdisc grafted should be the same as cl->leaf.q when
+                * calling htb_delete.
                 */
-               WARN_ON(!(old->flags & TCQ_F_BUILTIN));
-       else
                WARN_ON(old != q);
+       }
 
        if (cl->parent) {
                _bstats_update(&cl->parent->bstats_bias,
@@ -1577,10 +1584,12 @@ static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
        };
        err = htb_offload(qdisc_dev(sch), &offload_opt);
 
-       if (!err || destroying)
-               qdisc_put(old);
-       else
-               htb_graft_helper(dev_queue, old);
+       if (!destroying) {
+               if (!err)
+                       qdisc_put(old);
+               else
+                       htb_graft_helper(dev_queue, old);
+       }
 
        if (last_child)
                return err;
index 570389f6cdd7dbab5749dc06d886555305cbf623..9a11a499ea2df8d18c9c062496fdcbcf5a861391 100644 (file)
@@ -1700,6 +1700,8 @@ static void taprio_reset(struct Qdisc *sch)
        int i;
 
        hrtimer_cancel(&q->advance_timer);
+       qdisc_synchronize(sch);
+
        if (q->qdiscs) {
                for (i = 0; i < dev->num_tx_queues; i++)
                        if (q->qdiscs[i])
@@ -1720,6 +1722,7 @@ static void taprio_destroy(struct Qdisc *sch)
         * happens in qdisc_create(), after taprio_init() has been called.
         */
        hrtimer_cancel(&q->advance_timer);
+       qdisc_synchronize(sch);
 
        taprio_disable_offload(dev, q, NULL);
 
index 7bb247c51e2f66e7b8968c4cf054ed238aacf2b8..2d7b1e03110ae899aa156499eebac0711e61ae84 100644 (file)
@@ -302,7 +302,7 @@ __gss_find_upcall(struct rpc_pipe *pipe, kuid_t uid, const struct gss_auth *auth
        list_for_each_entry(pos, &pipe->in_downcall, list) {
                if (!uid_eq(pos->uid, uid))
                        continue;
-               if (auth && pos->auth->service != auth->service)
+               if (pos->auth->service != auth->service)
                        continue;
                refcount_inc(&pos->count);
                return pos;
@@ -686,6 +686,21 @@ out:
        return err;
 }
 
+static struct gss_upcall_msg *
+gss_find_downcall(struct rpc_pipe *pipe, kuid_t uid)
+{
+       struct gss_upcall_msg *pos;
+       list_for_each_entry(pos, &pipe->in_downcall, list) {
+               if (!uid_eq(pos->uid, uid))
+                       continue;
+               if (!rpc_msg_is_inflight(&pos->msg))
+                       continue;
+               refcount_inc(&pos->count);
+               return pos;
+       }
+       return NULL;
+}
+
 #define MSG_BUF_MAXSIZE 1024
 
 static ssize_t
@@ -732,7 +747,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
        err = -ENOENT;
        /* Find a matching upcall */
        spin_lock(&pipe->lock);
-       gss_msg = __gss_find_upcall(pipe, uid, NULL);
+       gss_msg = gss_find_downcall(pipe, uid);
        if (gss_msg == NULL) {
                spin_unlock(&pipe->lock);
                goto err_put_ctx;
index 148bb0a7fa5b4aae7f67bace1f43c9501d2841cb..acb822b23af1b01cd0b30d8346813bb671920901 100644 (file)
@@ -923,7 +923,7 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
         * rejecting the server-computed MIC in this somewhat rare case,
         * do not use splice with the GSS integrity service.
         */
-       __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        /* Did we already verify the signature on the original pass through? */
        if (rqstp->rq_deferred)
@@ -990,7 +990,7 @@ unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gs
        int pad, remaining_len, offset;
        u32 rseqno;
 
-       __clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       clear_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
 
        priv_len = svc_getnl(&buf->head[0]);
        if (rqstp->rq_deferred) {
index 85f0c3cfc87745f26aa3f8d869c71f136be22997..f06622814a958a5248d8407dd60024360c9019c9 100644 (file)
@@ -1243,10 +1243,10 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
                goto err_short_len;
 
        /* Will be turned off by GSS integrity and privacy services */
-       __set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
+       set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
        /* Will be turned off only when NFSv4 Sessions are used */
-       __set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
-       __clear_bit(RQ_DROPME, &rqstp->rq_flags);
+       set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
+       clear_bit(RQ_DROPME, &rqstp->rq_flags);
 
        svc_putu32(resv, rqstp->rq_xid);
 
index 2106003645a78d7a441efc839f3e31c58af186d1..c2ce12538008070e6d56bb0cdc24d727e9a9c487 100644 (file)
@@ -1238,7 +1238,7 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
        trace_svc_defer(rqstp);
        svc_xprt_get(rqstp->rq_xprt);
        dr->xprt = rqstp->rq_xprt;
-       __set_bit(RQ_DROPME, &rqstp->rq_flags);
+       set_bit(RQ_DROPME, &rqstp->rq_flags);
 
        dr->handle.revisit = svc_revisit;
        return &dr->handle;
index 015714398007aef28bcf5957a0994dc7cc5f4f5c..815baf308236a9d20ed7103603cfbc2af1fa1bd9 100644 (file)
@@ -298,9 +298,9 @@ static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs)
 static void svc_sock_secure_port(struct svc_rqst *rqstp)
 {
        if (svc_port_is_privileged(svc_addr(rqstp)))
-               __set_bit(RQ_SECURE, &rqstp->rq_flags);
+               set_bit(RQ_SECURE, &rqstp->rq_flags);
        else
-               __clear_bit(RQ_SECURE, &rqstp->rq_flags);
+               clear_bit(RQ_SECURE, &rqstp->rq_flags);
 }
 
 /*
@@ -1008,9 +1008,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
        rqstp->rq_xprt_ctxt   = NULL;
        rqstp->rq_prot        = IPPROTO_TCP;
        if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags))
-               __set_bit(RQ_LOCAL, &rqstp->rq_flags);
+               set_bit(RQ_LOCAL, &rqstp->rq_flags);
        else
-               __clear_bit(RQ_LOCAL, &rqstp->rq_flags);
+               clear_bit(RQ_LOCAL, &rqstp->rq_flags);
 
        p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
        calldir = p[1];
index 199fa012f18a879a6d3389dfc3b125cc12d14678..94b20fb471356356885da1b482750f387c82c9ab 100644 (file)
@@ -602,7 +602,7 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
 
 static void svc_rdma_secure_port(struct svc_rqst *rqstp)
 {
-       __set_bit(RQ_SECURE, &rqstp->rq_flags);
+       set_bit(RQ_SECURE, &rqstp->rq_flags);
 }
 
 static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
index 49ddc484c4fe72db01b0d560b221360c42f349f6..5e000fde806768f8d156c31bacde3b2b7c65fb83 100644 (file)
@@ -1179,8 +1179,9 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        bool addr_match = false;
        bool sign_match = false;
        bool link_up = false;
+       bool link_is_reset = false;
        bool accept_addr = false;
-       bool reset = true;
+       bool reset = false;
        char *if_name;
        unsigned long intv;
        u16 session;
@@ -1200,14 +1201,14 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        /* Prepare to validate requesting node's signature and media address */
        l = le->link;
        link_up = l && tipc_link_is_up(l);
+       link_is_reset = l && tipc_link_is_reset(l);
        addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
        sign_match = (signature == n->signature);
 
        /* These three flags give us eight permutations: */
 
        if (sign_match && addr_match && link_up) {
-               /* All is fine. Do nothing. */
-               reset = false;
+               /* All is fine. Ignore requests. */
                /* Peer node is not a container/local namespace */
                if (!n->peer_hash_mix)
                        n->peer_hash_mix = hash_mixes;
@@ -1232,6 +1233,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                 */
                accept_addr = true;
                *respond = true;
+               reset = true;
        } else if (!sign_match && addr_match && link_up) {
                /* Peer node rebooted. Two possibilities:
                 *  - Delayed re-discovery; this link endpoint has already
@@ -1263,6 +1265,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                n->signature = signature;
                accept_addr = true;
                *respond = true;
+               reset = true;
        }
 
        if (!accept_addr)
@@ -1291,6 +1294,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
                tipc_link_fsm_evt(l, LINK_RESET_EVT);
                if (n->state == NODE_FAILINGOVER)
                        tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
+               link_is_reset = tipc_link_is_reset(l);
                le->link = l;
                n->link_cnt++;
                tipc_node_calculate_timer(n, l);
@@ -1303,7 +1307,7 @@ void tipc_node_check_dest(struct net *net, u32 addr,
        memcpy(&le->maddr, maddr, sizeof(*maddr));
 exit:
        tipc_node_write_unlock(n);
-       if (reset && l && !tipc_link_is_reset(l))
+       if (reset && !link_is_reset)
                tipc_node_link_down(n, b->identity, false);
        tipc_node_put(n);
 }
index 5eb5e8280379ad9140f0b57bf4c51cae35367279..43343e13c5425579a0ba6cd3da01970e3580bc30 100644 (file)
@@ -44,6 +44,7 @@ modpost-args =                                                                                \
        $(if $(CONFIG_MODVERSIONS),-m)                                                  \
        $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a)                                        \
        $(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E)                                  \
+       $(if $(KBUILD_MODPOST_WARN),-w)                                                 \
        $(if $(KBUILD_NSDEPS),-d $(MODULES_NSDEPS))                                     \
        $(if $(CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS)$(KBUILD_NSDEPS),-N)       \
        -o $@
@@ -55,6 +56,13 @@ ifneq ($(findstring i,$(filter-out --%,$(MAKEFLAGS))),)
 modpost-args += -n
 endif
 
+# Read out modules.order to pass in modpost.
+# Otherwise, allmodconfig would fail with "Argument list too long".
+ifdef KBUILD_MODULES
+modpost-args += -T $(MODORDER)
+modpost-deps += $(MODORDER)
+endif
+
 ifeq ($(KBUILD_EXTMOD),)
 
 # Generate the list of in-tree objects in vmlinux
@@ -113,17 +121,10 @@ modpost-args += -e $(addprefix -i , $(KBUILD_EXTRA_SYMBOLS))
 
 endif # ($(KBUILD_EXTMOD),)
 
-ifneq ($(KBUILD_MODPOST_WARN)$(missing-input),)
+ifneq ($(missing-input),)
 modpost-args += -w
 endif
 
-ifdef KBUILD_MODULES
-modpost-args += -T $(MODORDER)
-modpost-deps += $(MODORDER)
-endif
-
-# Read out modules.order to pass in modpost.
-# Otherwise, allmodconfig would fail with "Argument list too long".
 quiet_cmd_modpost = MODPOST $@
       cmd_modpost = \
        $(if $(missing-input), \
index 539e9f765d64db8c924187afb208d8a016cba024..525a2820976fd06c9fd278c9495978bcfce22160 100644 (file)
@@ -158,6 +158,7 @@ $(perf-tar-pkgs):
 PHONY += help
 help:
        @echo '  rpm-pkg             - Build both source and binary RPM kernel packages'
+       @echo '  srcrpm-pkg          - Build only the source kernel RPM package'
        @echo '  binrpm-pkg          - Build only the binary kernel RPM package'
        @echo '  deb-pkg             - Build both source and binary deb kernel packages'
        @echo '  bindeb-pkg          - Build only the binary kernel deb package'
index 49946cb968440c6fecd63b8076e1a46a87d80aee..10176dec97eac23fea5385d1d97b80a3617eaea3 100644 (file)
@@ -18,6 +18,7 @@ quiet_cmd_cc_o_c = CC      $@
        $(call if_changed_dep,cc_o_c)
 
 ifdef CONFIG_MODULES
+KASAN_SANITIZE_.vmlinux.export.o := n
 targets += .vmlinux.export.o
 vmlinux: .vmlinux.export.o
 endif
index 2328f9a641dadf53490cccfe26830517e3426bf0..f932aeaba71a2e7696c9083896ac441d06ca08fa 100644 (file)
@@ -94,7 +94,6 @@
 #include <unistd.h>
 #include <fcntl.h>
 #include <string.h>
-#include <stdarg.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <ctype.h>
index 4192855f5b8b5c1e3b64480c11c8b1e001a89dcd..7eca035472d3030822f46c58cd8989b59601306b 100755 (executable)
@@ -26,11 +26,20 @@ try:
        # If the MAKEFLAGS variable contains multiple instances of the
        # --jobserver-auth= option, the last one is relevant.
        fds = opts[-1].split("=", 1)[1]
-       reader, writer = [int(x) for x in fds.split(",", 1)]
-       # Open a private copy of reader to avoid setting nonblocking
-       # on an unexpecting process with the same reader fd.
-       reader = os.open("/proc/self/fd/%d" % (reader),
-                        os.O_RDONLY | os.O_NONBLOCK)
+
+       # Starting with GNU Make 4.4, named pipes are used for reader and writer.
+       # Example argument: --jobserver-auth=fifo:/tmp/GMfifo8134
+       _, _, path = fds.partition('fifo:')
+
+       if path:
+               reader = os.open(path, os.O_RDONLY | os.O_NONBLOCK)
+               writer = os.open(path, os.O_WRONLY)
+       else:
+               reader, writer = [int(x) for x in fds.split(",", 1)]
+               # Open a private copy of reader to avoid setting nonblocking
+               # on an unexpecting process with the same reader fd.
+               reader = os.open("/proc/self/fd/%d" % (reader),
+                                os.O_RDONLY | os.O_NONBLOCK)
 
        # Read out as many jobserver slots as possible.
        while True:
index c8a3f9cd52f0281c3b070463596b8960f4d22412..0b2ff775b2e33a82749d6efa4820825e3118ba22 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
 /conf
 /[gmnq]conf
+/[gmnq]conf-bin
 /[gmnq]conf-cflags
 /[gmnq]conf-libs
-/qconf-bin
 /qconf-moc.cc
index 0b1d15efaeb0c2c71fbe3d31d06c1bba73240005..af1c96198f4919c1af966e5fc06dcddb7c405fef 100644 (file)
@@ -209,7 +209,7 @@ $(obj)/gconf: | $(obj)/gconf-libs
 $(obj)/gconf.o: | $(obj)/gconf-cflags
 
 # check if necessary packages are available, and configure build flags
-cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin)
+cmd_conf_cfg = $< $(addprefix $(obj)/$*conf-, cflags libs bin); touch $(obj)/$*conf-bin
 
 $(obj)/%conf-cflags $(obj)/%conf-libs $(obj)/%conf-bin: $(src)/%conf-cfg.sh
        $(call cmd,conf_cfg)
index 9c549683c62751959201c36954cbbb6eaddd706f..e67e0db50b2e2510b88f1507f0462ec1812813f4 100644 (file)
@@ -161,6 +161,12 @@ static const char mconf_readme[] =
 "(especially with a larger number of unrolled categories) than the\n"
 "default mode.\n"
 "\n"
+
+"Search\n"
+"-------\n"
+"Pressing the forward-slash (/) anywhere brings up a search dialog box.\n"
+"\n"
+
 "Different color themes available\n"
 "--------------------------------\n"
 "It is possible to select different color themes using the variable\n"
index dda00a948a01df3da7a32288a6767727a41b1c57..094e52c979a8cdbea1af95da6fcba93104e82ff4 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 #
 #      Output a simple RPM spec file.
-#      This version assumes a minimum of RPM 4.0.3.
+#      This version assumes a minimum of RPM 4.13
 #
 #      The only gothic bit here is redefining install_post to avoid
 #      stripping the symbols from files in the kernel which we want
@@ -51,7 +51,8 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF
        URL: https://www.kernel.org
 $S     Source: kernel-$__KERNELRELEASE.tar.gz
        Provides: $PROVIDES
-$S     BuildRequires: bc binutils bison dwarves elfutils-libelf-devel flex
+$S     BuildRequires: bc binutils bison dwarves
+$S     BuildRequires: (elfutils-libelf-devel or libelf-devel) flex
 $S     BuildRequires: gcc make openssl openssl-devel perl python3 rsync
 
        # $UTS_MACHINE as a fallback of _arch in case
index b9f867100a9ff54f6c31a7c6e8a37d0a72457cab..fad75be5f381d6e0d16466fa8b0b47fcfd092a23 100644 (file)
@@ -6,13 +6,11 @@ config SECURITY_TOMOYO
        select SECURITYFS
        select SECURITY_PATH
        select SECURITY_NETWORK
-       select SRCU
-       select BUILD_BIN2C
        default n
        help
          This selects TOMOYO Linux, pathname-based access control.
          Required userspace tools and further information may be
-         found at <http://tomoyo.sourceforge.jp/>.
+         found at <https://tomoyo.osdn.jp/>.
          If you are unsure how to answer this question, answer N.
 
 config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
index cca5a3012fee2e564ffa0ae293d1e8e223d80689..884ff155edc3929eff38bd084ba8893af86b6b17 100644 (file)
@@ -2,15 +2,18 @@
 obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
 
 targets += builtin-policy.h
-define do_policy
-echo "static char tomoyo_builtin_$(1)[] __initdata ="; \
-$(objtree)/scripts/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \
-echo ";"
-endef
-quiet_cmd_policy  = POLICY  $@
-      cmd_policy  = ($(call do_policy,profile); $(call do_policy,exception_policy); $(call do_policy,domain_policy); $(call do_policy,manager); $(call do_policy,stat)) >$@
 
-$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE
+quiet_cmd_policy = POLICY  $@
+      cmd_policy = { \
+       $(foreach x, profile exception_policy domain_policy manager stat, \
+       printf 'static char tomoyo_builtin_$x[] __initdata =\n'; \
+       sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/\t"\1\\n"/' -- $(firstword $(filter %/$x.conf %/$x.conf.default, $^) /dev/null);  \
+       printf '\t"";\n';) \
+       } > $@
+
+$(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(srctree)/$(src)/policy/*.conf.default) FORCE
        $(call if_changed,policy)
 
+ifndef CONFIG_SECURITY_TOMOYO_INSECURE_BUILTIN_SETTING
 $(obj)/common.o: $(obj)/builtin-policy.h
+endif
index 50e7ba66f1876efd03e31e7cf1ab4d5ddcb4fb88..82aa1af1d1d873566fef0b188750e03995626aad 100644 (file)
@@ -1203,14 +1203,19 @@ static int snd_ctl_elem_read(struct snd_card *card,
        const u32 pattern = 0xdeadbeef;
        int ret;
 
+       down_read(&card->controls_rwsem);
        kctl = snd_ctl_find_id(card, &control->id);
-       if (kctl == NULL)
-               return -ENOENT;
+       if (kctl == NULL) {
+               ret = -ENOENT;
+               goto unlock;
+       }
 
        index_offset = snd_ctl_get_ioff(kctl, &control->id);
        vd = &kctl->vd[index_offset];
-       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL)
-               return -EPERM;
+       if (!(vd->access & SNDRV_CTL_ELEM_ACCESS_READ) || kctl->get == NULL) {
+               ret = -EPERM;
+               goto unlock;
+       }
 
        snd_ctl_build_ioff(&control->id, kctl, index_offset);
 
@@ -1220,7 +1225,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
        info.id = control->id;
        ret = __snd_ctl_elem_info(card, kctl, &info, NULL);
        if (ret < 0)
-               return ret;
+               goto unlock;
 #endif
 
        if (!snd_ctl_skip_validation(&info))
@@ -1230,7 +1235,7 @@ static int snd_ctl_elem_read(struct snd_card *card,
                ret = kctl->get(kctl, control);
        snd_power_unref(card);
        if (ret < 0)
-               return ret;
+               goto unlock;
        if (!snd_ctl_skip_validation(&info) &&
            sanity_check_elem_value(card, control, &info, pattern) < 0) {
                dev_err(card->dev,
@@ -1238,8 +1243,11 @@ static int snd_ctl_elem_read(struct snd_card *card,
                        control->id.iface, control->id.device,
                        control->id.subdevice, control->id.name,
                        control->id.index);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto unlock;
        }
+unlock:
+       up_read(&card->controls_rwsem);
        return ret;
 }
 
@@ -1253,9 +1261,7 @@ static int snd_ctl_elem_read_user(struct snd_card *card,
        if (IS_ERR(control))
                return PTR_ERR(control);
 
-       down_read(&card->controls_rwsem);
        result = snd_ctl_elem_read(card, control);
-       up_read(&card->controls_rwsem);
        if (result < 0)
                goto error;
 
index f975cc85772bbce857fd2565fe720070a9f83bd9..3cadd40100f3e538a3834d177c024ede838d3117 100644 (file)
@@ -530,12 +530,11 @@ static ssize_t set_led_id(struct snd_ctl_led_card *led_card, const char *buf, si
                          bool attach)
 {
        char buf2[256], *s, *os;
-       size_t len = max(sizeof(s) - 1, count);
        struct snd_ctl_elem_id id;
        int err;
 
-       strncpy(buf2, buf, len);
-       buf2[len] = '\0';
+       if (strscpy(buf2, buf, sizeof(buf2)) < 0)
+               return -E2BIG;
        memset(&id, 0, sizeof(id));
        id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
        s = buf2;
index 91842c0c8c74b3224c9ecf47cf331263dd781611..f7815ee24f836cb0a3729c95a044bc111aedd1b5 100644 (file)
@@ -598,8 +598,8 @@ static int cs35l41_system_suspend(struct device *dev)
        dev_dbg(cs35l41->dev, "System Suspend\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Suspend not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Suspend not supported\n");
+               return 0; /* don't block the whole system suspend */
        }
 
        ret = pm_runtime_force_suspend(dev);
@@ -624,8 +624,8 @@ static int cs35l41_system_resume(struct device *dev)
        dev_dbg(cs35l41->dev, "System Resume\n");
 
        if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH) {
-               dev_err(cs35l41->dev, "System Resume not supported\n");
-               return -EINVAL;
+               dev_err_once(cs35l41->dev, "System Resume not supported\n");
+               return 0; /* don't block the whole system resume */
        }
 
        if (cs35l41->reset_gpio) {
@@ -647,6 +647,15 @@ static int cs35l41_system_resume(struct device *dev)
        return ret;
 }
 
+static int cs35l41_runtime_idle(struct device *dev)
+{
+       struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
+
+       if (cs35l41->hw_cfg.bst_type == CS35L41_EXT_BOOST_NO_VSPK_SWITCH)
+               return -EBUSY; /* suspend not supported yet on this model */
+       return 0;
+}
+
 static int cs35l41_runtime_suspend(struct device *dev)
 {
        struct cs35l41_hda *cs35l41 = dev_get_drvdata(dev);
@@ -1536,7 +1545,8 @@ void cs35l41_hda_remove(struct device *dev)
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_remove, SND_HDA_SCODEC_CS35L41);
 
 const struct dev_pm_ops cs35l41_hda_pm_ops = {
-       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume, NULL)
+       RUNTIME_PM_OPS(cs35l41_runtime_suspend, cs35l41_runtime_resume,
+                      cs35l41_runtime_idle)
        SYSTEM_SLEEP_PM_OPS(cs35l41_system_suspend, cs35l41_system_resume)
 };
 EXPORT_SYMBOL_NS_GPL(cs35l41_hda_pm_ops, SND_HDA_SCODEC_CS35L41);
index 8015e4471267857037b2ba22c86ecef6c290a71f..9ea633fe93393531e5df51912c39a9b04528b458 100644 (file)
@@ -167,6 +167,7 @@ struct hdmi_spec {
        struct hdmi_ops ops;
 
        bool dyn_pin_out;
+       bool static_pcm_mapping;
        /* hdmi interrupt trigger control flag for Nvidia codec */
        bool hdmi_intr_trig_ctrl;
        bool nv_dp_workaround; /* workaround DP audio infoframe for Nvidia */
@@ -1525,13 +1526,16 @@ static void update_eld(struct hda_codec *codec,
         */
        pcm_jack = pin_idx_to_pcm_jack(codec, per_pin);
 
-       if (eld->eld_valid) {
-               hdmi_attach_hda_pcm(spec, per_pin);
-               hdmi_pcm_setup_pin(spec, per_pin);
-       } else {
-               hdmi_pcm_reset_pin(spec, per_pin);
-               hdmi_detach_hda_pcm(spec, per_pin);
+       if (!spec->static_pcm_mapping) {
+               if (eld->eld_valid) {
+                       hdmi_attach_hda_pcm(spec, per_pin);
+                       hdmi_pcm_setup_pin(spec, per_pin);
+               } else {
+                       hdmi_pcm_reset_pin(spec, per_pin);
+                       hdmi_detach_hda_pcm(spec, per_pin);
+               }
        }
+
        /* if pcm_idx == -1, it means this is in monitor connection event
         * we can get the correct pcm_idx now.
         */
@@ -1977,6 +1981,7 @@ static const struct snd_pci_quirk force_connect_list[] = {
        SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
        SND_PCI_QUIRK(0x103c, 0x8711, "HP", 1),
+       SND_PCI_QUIRK(0x103c, 0x8715, "HP", 1),
        SND_PCI_QUIRK(0x1462, 0xec94, "MS-7C94", 1),
        SND_PCI_QUIRK(0x8086, 0x2081, "Intel NUC 10", 1),
        {}
@@ -2281,8 +2286,8 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int idx, pcm_num;
 
-       /* limit the PCM devices to the codec converters */
-       pcm_num = spec->num_cvts;
+       /* limit the PCM devices to the codec converters or available PINs */
+       pcm_num = min(spec->num_cvts, spec->num_pins);
        codec_dbg(codec, "hdmi: pcm_num set to %d\n", pcm_num);
 
        for (idx = 0; idx < pcm_num; idx++) {
@@ -2379,6 +2384,11 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
                struct hdmi_eld *pin_eld = &per_pin->sink_eld;
 
+               if (spec->static_pcm_mapping) {
+                       hdmi_attach_hda_pcm(spec, per_pin);
+                       hdmi_pcm_setup_pin(spec, per_pin);
+               }
+
                pin_eld->eld_valid = false;
                hdmi_present_sense(per_pin, 0);
        }
@@ -4419,6 +4429,8 @@ static int patch_atihdmi(struct hda_codec *codec)
 
        spec = codec->spec;
 
+       spec->static_pcm_mapping = true;
+
        spec->ops.pin_get_eld = atihdmi_pin_get_eld;
        spec->ops.pin_setup_infoframe = atihdmi_pin_setup_infoframe;
        spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
index e443d88f627f00040575398c853013d426206342..6fab7c8fc19ae19727d9b3b905579c3a5e3cfee1 100644 (file)
@@ -3564,6 +3564,15 @@ static void alc256_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
+               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
+               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
+               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               msleep(30);
+       }
+
        if (!hp_pin)
                hp_pin = 0x21;
 
@@ -3575,14 +3584,6 @@ static void alc256_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x03, 1<<1, 1<<1);
-               alc_update_coef_idx(codec, 0x08, 3<<2, 3<<2);
-               alc_update_coef_idx(codec, 0x08, 7<<4, 0);
-               alc_update_coef_idx(codec, 0x3b, 1<<15, 0);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               msleep(30);
-       }
 
        snd_hda_codec_write(codec, hp_pin, 0,
                            AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
@@ -3713,6 +3714,13 @@ static void alc225_init(struct hda_codec *codec)
        hda_nid_t hp_pin = alc_get_hp_pin(spec);
        bool hp1_pin_sense, hp2_pin_sense;
 
+       if (spec->ultra_low_power) {
+               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
+               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
+               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
+               msleep(30);
+       }
+
        if (spec->codec_variant != ALC269_TYPE_ALC287 &&
                spec->codec_variant != ALC269_TYPE_ALC245)
                /* required only at boot or S3 and S4 resume time */
@@ -3734,12 +3742,6 @@ static void alc225_init(struct hda_codec *codec)
                msleep(2);
 
        alc_update_coefex_idx(codec, 0x57, 0x04, 0x0007, 0x1); /* Low power */
-       if (spec->ultra_low_power) {
-               alc_update_coef_idx(codec, 0x08, 0x0f << 2, 3<<2);
-               alc_update_coef_idx(codec, 0x0e, 7<<6, 7<<6);
-               alc_update_coef_idx(codec, 0x33, 1<<11, 0);
-               msleep(30);
-       }
 
        if (hp1_pin_sense || spec->ultra_low_power)
                snd_hda_codec_write(codec, hp_pin, 0,
@@ -4644,6 +4646,16 @@ static void alc285_fixup_hp_coef_micmute_led(struct hda_codec *codec,
        }
 }
 
+static void alc285_fixup_hp_gpio_micmute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE)
+               spec->micmute_led_polarity = 1;
+       alc_fixup_hp_gpio_led(codec, action, 0, 0x04);
+}
+
 static void alc236_fixup_hp_coef_micmute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -4665,6 +4677,13 @@ static void alc285_fixup_hp_mute_led(struct hda_codec *codec,
        alc285_fixup_hp_coef_micmute_led(codec, fix, action);
 }
 
+static void alc285_fixup_hp_spectre_x360_mute_led(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       alc285_fixup_hp_mute_led_coefbit(codec, fix, action);
+       alc285_fixup_hp_gpio_micmute_led(codec, fix, action);
+}
+
 static void alc236_fixup_hp_mute_led(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -7106,6 +7125,7 @@ enum {
        ALC285_FIXUP_ASUS_G533Z_PINS,
        ALC285_FIXUP_HP_GPIO_LED,
        ALC285_FIXUP_HP_MUTE_LED,
+       ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED,
        ALC236_FIXUP_HP_GPIO_LED,
        ALC236_FIXUP_HP_MUTE_LED,
        ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF,
@@ -7175,6 +7195,7 @@ enum {
        ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK,
        ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN,
        ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS,
+       ALC236_FIXUP_DELL_DUAL_CODECS,
 };
 
 /* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -8485,6 +8506,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc285_fixup_hp_mute_led,
        },
+       [ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_hp_spectre_x360_mute_led,
+       },
        [ALC236_FIXUP_HP_GPIO_LED] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc236_fixup_hp_gpio_led,
@@ -9130,6 +9155,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_DELL4_MIC_NO_PRESENCE,
        },
+       [ALC236_FIXUP_DELL_DUAL_CODECS] = {
+               .type = HDA_FIXUP_PINS,
+               .v.func = alc1220_fixup_gb_dual_codecs,
+               .chained = true,
+               .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9232,6 +9263,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0b1a, "Dell Precision 5570", ALC289_FIXUP_DUAL_SPK),
        SND_PCI_QUIRK(0x1028, 0x0b37, "Dell Inspiron 16 Plus 7620 2-in-1", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
        SND_PCI_QUIRK(0x1028, 0x0b71, "Dell Inspiron 16 Plus 7620", ALC295_FIXUP_DELL_INSPIRON_TOP_SPEAKERS),
+       SND_PCI_QUIRK(0x1028, 0x0c03, "Dell Precision 5340", ALC269_FIXUP_DELL4_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0c19, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1028, 0x0c1a, "Dell Precision 3340", ALC236_FIXUP_DELL_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1028, 0x0c1b, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1028, 0x0c1c, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1028, 0x0c1d, "Dell Precision 3440", ALC236_FIXUP_DELL_DUAL_CODECS),
+       SND_PCI_QUIRK(0x1028, 0x0c1e, "Dell Precision 3540", ALC236_FIXUP_DELL_DUAL_CODECS),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -9314,6 +9352,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
        SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
+       SND_PCI_QUIRK(0x103c, 0x86f9, "HP Spectre x360 13-aw0xxx", ALC285_FIXUP_HP_SPECTRE_X360_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x8716, "HP Elite Dragonfly G2 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8720, "HP EliteBook x360 1040 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x8724, "HP EliteBook 850 G7", ALC285_FIXUP_HP_GPIO_LED),
@@ -9393,6 +9432,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x8ad2, "HP EliteBook 860 16 inch G9 Notebook PC", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
        SND_PCI_QUIRK(0x103c, 0x8b5d, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
        SND_PCI_QUIRK(0x103c, 0x8b5e, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF),
+       SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED),
        SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
        SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
index 1f0b5527c59497bb542d94340c3b52126cfd0b3b..0d283e41f66dc0d8249e84f9e1f85a6b8ea12095 100644 (file)
@@ -206,6 +206,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "UM5302TA"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "M5402RA"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
@@ -220,6 +227,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Redmi Book Pro 14 2022"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "Razer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Blade 14 (2022) - RZ09-0427"),
+               }
+       },
        {}
 };
 
index 644300e88b4c5e32c24992d2afe96a0e7bb216ce..fcf4fbaed3c76997ef91aec9642d9b1c409cbccb 100644 (file)
@@ -177,8 +177,20 @@ static int rt9120_codec_probe(struct snd_soc_component *comp)
        return 0;
 }
 
+static int rt9120_codec_suspend(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_suspend(comp->dev);
+}
+
+static int rt9120_codec_resume(struct snd_soc_component *comp)
+{
+       return pm_runtime_force_resume(comp->dev);
+}
+
 static const struct snd_soc_component_driver rt9120_component_driver = {
        .probe = rt9120_codec_probe,
+       .suspend = rt9120_codec_suspend,
+       .resume = rt9120_codec_resume,
        .controls = rt9120_snd_controls,
        .num_controls = ARRAY_SIZE(rt9120_snd_controls),
        .dapm_widgets = rt9120_dapm_widgets,
index ca6a01a230af4a6ca056a033cd682b8547b61578..791d8738d1c0eca4f16120ee67c5607a9f061571 100644 (file)
@@ -697,6 +697,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
        int dcs_mask;
        int dcs_l, dcs_r;
        int dcs_l_reg, dcs_r_reg;
+       int an_out_reg;
        int timeout;
        int pwr_reg;
 
@@ -712,6 +713,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_0 | WM8904_DCS_ENA_CHAN_1;
                dcs_r_reg = WM8904_DC_SERVO_8;
                dcs_l_reg = WM8904_DC_SERVO_9;
+               an_out_reg = WM8904_ANALOGUE_OUT1_LEFT;
                dcs_l = 0;
                dcs_r = 1;
                break;
@@ -720,6 +722,7 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                dcs_mask = WM8904_DCS_ENA_CHAN_2 | WM8904_DCS_ENA_CHAN_3;
                dcs_r_reg = WM8904_DC_SERVO_6;
                dcs_l_reg = WM8904_DC_SERVO_7;
+               an_out_reg = WM8904_ANALOGUE_OUT2_LEFT;
                dcs_l = 2;
                dcs_r = 3;
                break;
@@ -792,6 +795,10 @@ static int out_pga_event(struct snd_soc_dapm_widget *w,
                snd_soc_component_update_bits(component, reg,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP,
                                    WM8904_HPL_ENA_OUTP | WM8904_HPR_ENA_OUTP);
+
+               /* Update volume, requires PGA to be powered */
+               val = snd_soc_component_read(component, an_out_reg);
+               snd_soc_component_write(component, an_out_reg, val);
                break;
 
        case SND_SOC_DAPM_POST_PMU:
index c836848ef0a65ecce8e6bf970fa48f6bcf91d33c..8d14b5593658d67e5039d2ed42a92b2d9c08044e 100644 (file)
@@ -121,11 +121,11 @@ static const struct snd_soc_dapm_route audio_map[] = {
 
 static const struct snd_soc_dapm_route audio_map_ac97[] = {
        /* 1st half -- Normal DAPM routes */
-       {"Playback",  NULL, "AC97 Playback"},
-       {"AC97 Capture",  NULL, "Capture"},
+       {"AC97 Playback",  NULL, "CPU AC97 Playback"},
+       {"CPU AC97 Capture",  NULL, "AC97 Capture"},
        /* 2nd half -- ASRC DAPM routes */
-       {"AC97 Playback",  NULL, "ASRC-Playback"},
-       {"ASRC-Capture",  NULL, "AC97 Capture"},
+       {"CPU AC97 Playback",  NULL, "ASRC-Playback"},
+       {"ASRC-Capture",  NULL, "CPU AC97 Capture"},
 };
 
 static const struct snd_soc_dapm_route audio_map_tx[] = {
index 7b17f152bbf3cb344f76a62e592299be8e5c016e..94341e4352b3cd35a4a9e2abe6bb3d994357a98b 100644 (file)
@@ -315,21 +315,21 @@ static int hwvad_detected(struct snd_kcontrol *kcontrol,
 
 static const struct snd_kcontrol_new fsl_micfil_snd_controls[] = {
        SOC_SINGLE_SX_TLV("CH0 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(0), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH1 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(1), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH2 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(2), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH3 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(3), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH4 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(4), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH5 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(5), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH6 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(6), 0x8, 0xF, gain_tlv),
        SOC_SINGLE_SX_TLV("CH7 Volume", REG_MICFIL_OUT_CTRL,
-                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0xF, 0x7, gain_tlv),
+                         MICFIL_OUTGAIN_CHX_SHIFT(7), 0x8, 0xF, gain_tlv),
        SOC_ENUM_EXT("MICFIL Quality Select",
                     fsl_micfil_quality_enum,
                     micfil_quality_get, micfil_quality_set),
index c9e0e31d5b34d43db672c2aaee3d461dc3c247c1..46a53551b955c6c33415df20d2fcb8f576e90a43 100644 (file)
@@ -1189,14 +1189,14 @@ static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
        .symmetric_channels = 1,
        .probe = fsl_ssi_dai_probe,
        .playback = {
-               .stream_name = "AC97 Playback",
+               .stream_name = "CPU AC97 Playback",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_8000_48000,
                .formats = SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_S20,
        },
        .capture = {
-               .stream_name = "AC97 Capture",
+               .stream_name = "CPU AC97 Capture",
                .channels_min = 2,
                .channels_max = 2,
                .rates = SNDRV_PCM_RATE_48000,
index a472de1909f4ba05bf5c139528708335cc39cb60..99308ed85277b8abd85685d0dd33995777ac5762 100644 (file)
@@ -554,10 +554,12 @@ config SND_SOC_INTEL_SOF_NAU8825_MACH
        select SND_SOC_RT1015P
        select SND_SOC_MAX98373_I2C
        select SND_SOC_MAX98357A
+       select SND_SOC_NAU8315
        select SND_SOC_DMIC
        select SND_SOC_HDAC_HDMI
        select SND_SOC_INTEL_HDA_DSP_COMMON
        select SND_SOC_INTEL_SOF_MAXIM_COMMON
+       select SND_SOC_INTEL_SOF_REALTEK_COMMON
        help
           This adds support for ASoC machine driver for SOF platforms
           with nau8825 codec.
index 27880224359d5be790d4aaa16be22e78b749197e..a800854c2831a130d1d549be3d7fe0e521f99202 100644 (file)
@@ -48,6 +48,7 @@
 #define SOF_MAX98373_SPEAKER_AMP_PRESENT       BIT(15)
 #define SOF_MAX98360A_SPEAKER_AMP_PRESENT      BIT(16)
 #define SOF_RT1015P_SPEAKER_AMP_PRESENT        BIT(17)
+#define SOF_NAU8318_SPEAKER_AMP_PRESENT        BIT(18)
 
 static unsigned long sof_nau8825_quirk = SOF_NAU8825_SSP_CODEC(0);
 
@@ -338,6 +339,13 @@ static struct snd_soc_dai_link_component rt1019p_component[] = {
        }
 };
 
+static struct snd_soc_dai_link_component nau8318_components[] = {
+       {
+               .name = "NVTN2012:00",
+               .dai_name = "nau8315-hifi",
+       }
+};
+
 static struct snd_soc_dai_link_component dummy_component[] = {
        {
                .name = "snd-soc-dummy",
@@ -486,6 +494,11 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                        max_98360a_dai_link(&links[id]);
                } else if (sof_nau8825_quirk & SOF_RT1015P_SPEAKER_AMP_PRESENT) {
                        sof_rt1015p_dai_link(&links[id]);
+               } else if (sof_nau8825_quirk &
+                               SOF_NAU8318_SPEAKER_AMP_PRESENT) {
+                       links[id].codecs = nau8318_components;
+                       links[id].num_codecs = ARRAY_SIZE(nau8318_components);
+                       links[id].init = speaker_codec_init;
                } else {
                        goto devm_err;
                }
@@ -618,7 +631,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1019p_nau8825",
+               .name = "adl_rt1019p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1019P_SPEAKER_AMP_PRESENT |
@@ -626,7 +639,7 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_NAU8825_NUM_HDMIDEV(4)),
        },
        {
-               .name = "adl_max98373_nau8825",
+               .name = "adl_max98373_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98373_SPEAKER_AMP_PRESENT |
@@ -637,7 +650,7 @@ static const struct platform_device_id board_ids[] = {
        },
        {
                /* The limitation of length of char array, shorten the name */
-               .name = "adl_mx98360a_nau8825",
+               .name = "adl_mx98360a_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_MAX98360A_SPEAKER_AMP_PRESENT |
@@ -648,7 +661,7 @@ static const struct platform_device_id board_ids[] = {
 
        },
        {
-               .name = "adl_rt1015p_nau8825",
+               .name = "adl_rt1015p_8825",
                .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
                                        SOF_SPEAKER_AMP_PRESENT |
                                        SOF_RT1015P_SPEAKER_AMP_PRESENT |
@@ -657,6 +670,16 @@ static const struct platform_device_id board_ids[] = {
                                        SOF_BT_OFFLOAD_SSP(2) |
                                        SOF_SSP_BT_OFFLOAD_PRESENT),
        },
+       {
+               .name = "adl_nau8318_8825",
+               .driver_data = (kernel_ulong_t)(SOF_NAU8825_SSP_CODEC(0) |
+                                       SOF_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8318_SPEAKER_AMP_PRESENT |
+                                       SOF_NAU8825_SSP_AMP(1) |
+                                       SOF_NAU8825_NUM_HDMIDEV(4) |
+                                       SOF_BT_OFFLOAD_SSP(2) |
+                                       SOF_SSP_BT_OFFLOAD_PRESENT),
+       },
        { }
 };
 MODULE_DEVICE_TABLE(platform, board_ids);
index 60aee56f94bd43cb06c6160c4c7dbe6a2c4fd83a..56ee5fef66a8be93b1eafdb210b698d9aa0c1ec8 100644 (file)
@@ -450,6 +450,11 @@ static const struct snd_soc_acpi_codecs adl_lt6911_hdmi = {
        .codecs = {"INTC10B0"}
 };
 
+static const struct snd_soc_acpi_codecs adl_nau8318_amp = {
+       .num_codecs = 1,
+       .codecs = {"NVTN2012"}
+};
+
 struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        {
                .comp_ids = &adl_rt5682_rt5682s_hp,
@@ -474,21 +479,21 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1019p_nau8825",
+               .drv_name = "adl_rt1019p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1019p_amp,
                .sof_tplg_filename = "sof-adl-rt1019-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_max98373_nau8825",
+               .drv_name = "adl_max98373_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98373_amp,
                .sof_tplg_filename = "sof-adl-max98373-nau8825.tplg",
        },
        {
                .id = "10508825",
-               .drv_name = "adl_mx98360a_nau8825",
+               .drv_name = "adl_mx98360a_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_max98360a_amp,
                .sof_tplg_filename = "sof-adl-max98360a-nau8825.tplg",
@@ -502,11 +507,18 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = {
        },
        {
                .id = "10508825",
-               .drv_name = "adl_rt1015p_nau8825",
+               .drv_name = "adl_rt1015p_8825",
                .machine_quirk = snd_soc_acpi_codec_list,
                .quirk_data = &adl_rt1015p_amp,
                .sof_tplg_filename = "sof-adl-rt1015-nau8825.tplg",
        },
+       {
+               .id = "10508825",
+               .drv_name = "adl_nau8318_8825",
+               .machine_quirk = snd_soc_acpi_codec_list,
+               .quirk_data = &adl_nau8318_amp,
+               .sof_tplg_filename = "sof-adl-nau8318-nau8825.tplg",
+       },
        {
                .id = "10508825",
                .drv_name = "sof_nau8825",
index 31b43116e3d88fec2ab64f141e71a33b57c016a0..07f96a11ea2f971aa7ec7159d7b7fe00529b5d93 100644 (file)
@@ -203,6 +203,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link2_rt1316_link01[] = {
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt711_sdca_2_adr),
+               .adr_d = rt711_sdca_2_adr,
+       },
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt1316_0_group2_adr),
+               .adr_d = rt1316_0_group2_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1316_1_group2_adr),
+               .adr_d = rt1316_1_group2_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt714_link3[] = {
        {
                .mask = BIT(0),
@@ -227,6 +246,25 @@ static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12_rt71
        {}
 };
 
+static const struct snd_soc_acpi_link_adr rpl_sdw_rt711_link0_rt1318_link12[] = {
+       {
+               .mask = BIT(0),
+               .num_adr = ARRAY_SIZE(rt711_sdca_0_adr),
+               .adr_d = rt711_sdca_0_adr,
+       },
+       {
+               .mask = BIT(1),
+               .num_adr = ARRAY_SIZE(rt1318_1_group1_adr),
+               .adr_d = rt1318_1_group1_adr,
+       },
+       {
+               .mask = BIT(2),
+               .num_adr = ARRAY_SIZE(rt1318_2_group1_adr),
+               .adr_d = rt1318_2_group1_adr,
+       },
+       {}
+};
+
 static const struct snd_soc_acpi_link_adr rpl_sdw_rt1316_link12_rt714_link0[] = {
        {
                .mask = BIT(1),
@@ -271,12 +309,24 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_rpl_sdw_machines[] = {
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12-rt714-l3.tplg",
        },
+       {
+               .link_mask = 0x7, /* rt711 on link0 & two rt1318s on link1 and link2 */
+               .links = rpl_sdw_rt711_link0_rt1318_link12,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l0-rt1318-l12.tplg",
+       },
        {
                .link_mask = 0x7, /* rt714 on link0 & two rt1316s on link1 and link2 */
                .links = rpl_sdw_rt1316_link12_rt714_link0,
                .drv_name = "sof_sdw",
                .sof_tplg_filename = "sof-rpl-rt1316-l12-rt714-l0.tplg",
        },
+       {
+               .link_mask = 0x7, /* rt711 on link2 & two rt1316s on link0 and link1 */
+               .links = rpl_sdw_rt711_link2_rt1316_link01,
+               .drv_name = "sof_sdw",
+               .sof_tplg_filename = "sof-rpl-rt711-l2-rt1316-l01.tplg",
+       },
        {
                .link_mask = 0x1, /* link0 required */
                .links = rpl_rvp,
index 363fa4d476800fffdf2e3d6fdab753fd80076091..b027fba8233df6cc38c6b06067926d5f6f315176 100644 (file)
@@ -182,10 +182,12 @@ config SND_SOC_MT8186_MT6366_DA7219_MAX98357
          If unsure select "N".
 
 config SND_SOC_MT8186_MT6366_RT1019_RT5682S
-       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S codec"
+       tristate "ASoC Audio driver for MT8186 with RT1019 RT5682S MAX98357A/MAX98360 codec"
        depends on I2C && GPIOLIB
        depends on SND_SOC_MT8186 && MTK_PMIC_WRAP
+       select SND_SOC_MAX98357A
        select SND_SOC_MT6358
+       select SND_SOC_MAX98357A
        select SND_SOC_RT1015P
        select SND_SOC_RT5682S
        select SND_SOC_BT_SCO
index 8f77a0bc1dc8d7173e24773f19e26f5b82579fab..af44e331dae8241186795879a6208d3a48ac5bae 100644 (file)
@@ -1083,6 +1083,21 @@ static struct snd_soc_card mt8186_mt6366_rt1019_rt5682s_soc_card = {
        .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
 };
 
+static struct snd_soc_card mt8186_mt6366_rt5682s_max98360_soc_card = {
+       .name = "mt8186_rt5682s_max98360",
+       .owner = THIS_MODULE,
+       .dai_link = mt8186_mt6366_rt1019_rt5682s_dai_links,
+       .num_links = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_dai_links),
+       .controls = mt8186_mt6366_rt1019_rt5682s_controls,
+       .num_controls = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_controls),
+       .dapm_widgets = mt8186_mt6366_rt1019_rt5682s_widgets,
+       .num_dapm_widgets = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_widgets),
+       .dapm_routes = mt8186_mt6366_rt1019_rt5682s_routes,
+       .num_dapm_routes = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_routes),
+       .codec_conf = mt8186_mt6366_rt1019_rt5682s_codec_conf,
+       .num_configs = ARRAY_SIZE(mt8186_mt6366_rt1019_rt5682s_codec_conf),
+};
+
 static int mt8186_mt6366_rt1019_rt5682s_dev_probe(struct platform_device *pdev)
 {
        struct snd_soc_card *card;
@@ -1232,9 +1247,14 @@ err_adsp_node:
 
 #if IS_ENABLED(CONFIG_OF)
 static const struct of_device_id mt8186_mt6366_rt1019_rt5682s_dt_match[] = {
-       {       .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt1019-rt5682s-sound",
                .data = &mt8186_mt6366_rt1019_rt5682s_soc_card,
        },
+       {
+               .compatible = "mediatek,mt8186-mt6366-rt5682s-max98360-sound",
+               .data = &mt8186_mt6366_rt5682s_max98360_soc_card,
+       },
        {}
 };
 MODULE_DEVICE_TABLE(of, mt8186_mt6366_rt1019_rt5682s_dt_match);
index 96a6d4731e6fd9f92f0464efffb302dfd91dc6ae..e7b00d1d9e99fddef1aaa2859388468b00ff59a1 100644 (file)
@@ -2,7 +2,6 @@
 menuconfig SND_SOC_QCOM
        tristate "ASoC support for QCOM platforms"
        depends on ARCH_QCOM || COMPILE_TEST
-       imply SND_SOC_QCOM_COMMON
        help
          Say Y or M if you want to add support to use audio devices
          in Qualcomm Technologies SOC-based platforms.
@@ -60,14 +59,16 @@ config SND_SOC_STORM
 config SND_SOC_APQ8016_SBC
        tristate "SoC Audio support for APQ8016 SBC platforms"
        select SND_SOC_LPASS_APQ8016
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8016 SOC-based systems.
          Say Y if you want to use audio devices on MI2S.
 
 config SND_SOC_QCOM_COMMON
-       depends on SOUNDWIRE
+       tristate
+
+config SND_SOC_QCOM_SDW
        tristate
 
 config SND_SOC_QDSP6_COMMON
@@ -144,7 +145,7 @@ config SND_SOC_MSM8996
        depends on QCOM_APR
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        help
          Support for Qualcomm Technologies LPASS audio block in
          APQ8096 SoC-based systems.
@@ -155,7 +156,7 @@ config SND_SOC_SDM845
        depends on QCOM_APR && I2C && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_RT5663
        select SND_SOC_MAX98927
        imply SND_SOC_CROS_EC_CODEC
@@ -169,7 +170,8 @@ config SND_SOC_SM8250
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SM8250 SoC-based systems.
@@ -180,7 +182,8 @@ config SND_SOC_SC8280XP
        depends on QCOM_APR && SOUNDWIRE
        depends on COMMON_CLK
        select SND_SOC_QDSP6
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_SDW
        help
          To add support for audio on Qualcomm Technologies Inc.
          SC8280XP SoC-based systems.
@@ -190,7 +193,7 @@ config SND_SOC_SC7180
        tristate "SoC Machine driver for SC7180 boards"
        depends on I2C && GPIOLIB
        depends on SOUNDWIRE || SOUNDWIRE=n
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7180
        select SND_SOC_MAX98357A
        select SND_SOC_RT5682_I2C
@@ -204,7 +207,7 @@ config SND_SOC_SC7180
 config SND_SOC_SC7280
        tristate "SoC Machine driver for SC7280 boards"
        depends on I2C && SOUNDWIRE
-       depends on SND_SOC_QCOM_COMMON
+       select SND_SOC_QCOM_COMMON
        select SND_SOC_LPASS_SC7280
        select SND_SOC_MAX98357A
        select SND_SOC_WCD938X_SDW
index 8b97172cf990fb0d825944214372c31ced9a56d3..254350d9dc069c6d3c9de4650c2470e5030c42fe 100644 (file)
@@ -28,6 +28,7 @@ snd-soc-sdm845-objs := sdm845.o
 snd-soc-sm8250-objs := sm8250.o
 snd-soc-sc8280xp-objs := sc8280xp.o
 snd-soc-qcom-common-objs := common.o
+snd-soc-qcom-sdw-objs := sdw.o
 
 obj-$(CONFIG_SND_SOC_STORM) += snd-soc-storm.o
 obj-$(CONFIG_SND_SOC_APQ8016_SBC) += snd-soc-apq8016-sbc.o
@@ -38,6 +39,7 @@ obj-$(CONFIG_SND_SOC_SC8280XP) += snd-soc-sc8280xp.o
 obj-$(CONFIG_SND_SOC_SDM845) += snd-soc-sdm845.o
 obj-$(CONFIG_SND_SOC_SM8250) += snd-soc-sm8250.o
 obj-$(CONFIG_SND_SOC_QCOM_COMMON) += snd-soc-qcom-common.o
+obj-$(CONFIG_SND_SOC_QCOM_SDW) += snd-soc-qcom-sdw.o
 
 #DSP lib
 obj-$(CONFIG_SND_SOC_QDSP6) += qdsp6/
index 49c74c1662a3f240bd45d8fa7e21011827932e49..96fe80241fb417d7eda94db912e3dbd8a9ac7a53 100644 (file)
@@ -180,120 +180,6 @@ err_put_np:
 }
 EXPORT_SYMBOL_GPL(qcom_snd_parse_of);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       int ret;
-
-       if (!sruntime)
-               return 0;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               break;
-       default:
-               return 0;
-       }
-
-       if (*stream_prepared) {
-               sdw_disable_stream(sruntime);
-               sdw_deprepare_stream(sruntime);
-               *stream_prepared = false;
-       }
-
-       ret = sdw_prepare_stream(sruntime);
-       if (ret)
-               return ret;
-
-       /**
-        * NOTE: there is a strict hw requirement about the ordering of port
-        * enables and actual WSA881x PA enable. PA enable should only happen
-        * after soundwire ports are enabled if not DC on the line is
-        * accumulated resulting in Click/Pop Noise
-        * PA enable/mute are handled as part of codec DAPM and digital mute.
-        */
-
-       ret = sdw_enable_stream(sruntime);
-       if (ret) {
-               sdw_deprepare_stream(sruntime);
-               return ret;
-       }
-       *stream_prepared  = true;
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
-
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *codec_dai;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-       struct sdw_stream_runtime *sruntime;
-       int i;
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               for_each_rtd_codec_dais(rtd, i, codec_dai) {
-                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
-                       if (sruntime != ERR_PTR(-ENOTSUPP))
-                               *psruntime = sruntime;
-               }
-               break;
-       }
-
-       return 0;
-
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
-
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
-{
-       struct snd_soc_pcm_runtime *rtd = substream->private_data;
-       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
-
-       switch (cpu_dai->id) {
-       case WSA_CODEC_DMA_RX_0:
-       case WSA_CODEC_DMA_RX_1:
-       case RX_CODEC_DMA_RX_0:
-       case RX_CODEC_DMA_RX_1:
-       case TX_CODEC_DMA_TX_0:
-       case TX_CODEC_DMA_TX_1:
-       case TX_CODEC_DMA_TX_2:
-       case TX_CODEC_DMA_TX_3:
-               if (sruntime && *stream_prepared) {
-                       sdw_disable_stream(sruntime);
-                       sdw_deprepare_stream(sruntime);
-                       *stream_prepared = false;
-               }
-               break;
-       default:
-               break;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
-
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup)
 {
index 3ef5bb6d12df733e560e65fb3687d1e6edead703..d7f80ee5ae26aed26c20f8d751d96cdbce224550 100644 (file)
@@ -5,19 +5,9 @@
 #define __QCOM_SND_COMMON_H__
 
 #include <sound/soc.h>
-#include <linux/soundwire/sdw.h>
 
 int qcom_snd_parse_of(struct snd_soc_card *card);
 int qcom_snd_wcd_jack_setup(struct snd_soc_pcm_runtime *rtd,
                            struct snd_soc_jack *jack, bool *jack_setup);
 
-int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *runtime,
-                        bool *stream_prepared);
-int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
-                          struct snd_pcm_hw_params *params,
-                          struct sdw_stream_runtime **psruntime);
-int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
-                        struct sdw_stream_runtime *sruntime,
-                        bool *stream_prepared);
 #endif
index 54353842dc07f01a7097e8ec865ba8b00e28d706..dbdaaa85ce48113b740f541f21a1181d015e535a 100644 (file)
@@ -1037,10 +1037,11 @@ static void of_lpass_cpu_parse_dai_data(struct device *dev,
                                        struct lpass_data *data)
 {
        struct device_node *node;
-       int ret, id;
+       int ret, i, id;
 
        /* Allow all channels by default for backwards compatibility */
-       for (id = 0; id < data->variant->num_dai; id++) {
+       for (i = 0; i < data->variant->num_dai; i++) {
+               id = data->variant->dai_driver[i].id;
                data->mi2s_playback_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
                data->mi2s_capture_sd_mode[id] = LPAIF_I2SCTL_MODE_8CH;
        }
index ade44ad7c585a3d1a5a7c593eddf2c08c89730db..14d9fea33d16ab10253ab132abd6818a564c8bd0 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sc8280xp"
 
diff --git a/sound/soc/qcom/sdw.c b/sound/soc/qcom/sdw.c
new file mode 100644 (file)
index 0000000..1024951
--- /dev/null
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018, Linaro Limited.
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#include <linux/module.h>
+#include <sound/soc.h>
+#include "qdsp6/q6afe.h"
+#include "sdw.h"
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       int ret;
+
+       if (!sruntime)
+               return 0;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               break;
+       default:
+               return 0;
+       }
+
+       if (*stream_prepared) {
+               sdw_disable_stream(sruntime);
+               sdw_deprepare_stream(sruntime);
+               *stream_prepared = false;
+       }
+
+       ret = sdw_prepare_stream(sruntime);
+       if (ret)
+               return ret;
+
+       /**
+        * NOTE: there is a strict hw requirement about the ordering of port
+        * enables and actual WSA881x PA enable. PA enable should only happen
+        * after soundwire ports are enabled if not DC on the line is
+        * accumulated resulting in Click/Pop Noise
+        * PA enable/mute are handled as part of codec DAPM and digital mute.
+        */
+
+       ret = sdw_enable_stream(sruntime);
+       if (ret) {
+               sdw_deprepare_stream(sruntime);
+               return ret;
+       }
+       *stream_prepared  = true;
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_prepare);
+
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *codec_dai;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+       struct sdw_stream_runtime *sruntime;
+       int i;
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               for_each_rtd_codec_dais(rtd, i, codec_dai) {
+                       sruntime = snd_soc_dai_get_stream(codec_dai, substream->stream);
+                       if (sruntime != ERR_PTR(-ENOTSUPP))
+                               *psruntime = sruntime;
+               }
+               break;
+       }
+
+       return 0;
+
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_params);
+
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime, bool *stream_prepared)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+
+       switch (cpu_dai->id) {
+       case WSA_CODEC_DMA_RX_0:
+       case WSA_CODEC_DMA_RX_1:
+       case RX_CODEC_DMA_RX_0:
+       case RX_CODEC_DMA_RX_1:
+       case TX_CODEC_DMA_TX_0:
+       case TX_CODEC_DMA_TX_1:
+       case TX_CODEC_DMA_TX_2:
+       case TX_CODEC_DMA_TX_3:
+               if (sruntime && *stream_prepared) {
+                       sdw_disable_stream(sruntime);
+                       sdw_deprepare_stream(sruntime);
+                       *stream_prepared = false;
+               }
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qcom_snd_sdw_hw_free);
+MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/qcom/sdw.h b/sound/soc/qcom/sdw.h
new file mode 100644 (file)
index 0000000..d74cbb8
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2018, The Linux Foundation. All rights reserved.
+
+#ifndef __QCOM_SND_SDW_H__
+#define __QCOM_SND_SDW_H__
+
+#include <linux/soundwire/sdw.h>
+
+int qcom_snd_sdw_prepare(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *runtime,
+                        bool *stream_prepared);
+int qcom_snd_sdw_hw_params(struct snd_pcm_substream *substream,
+                          struct snd_pcm_hw_params *params,
+                          struct sdw_stream_runtime **psruntime);
+int qcom_snd_sdw_hw_free(struct snd_pcm_substream *substream,
+                        struct sdw_stream_runtime *sruntime,
+                        bool *stream_prepared);
+#endif
index 8dbe9ef41b1c9e463b94a491e7ebb2e33ce1dd2f..9626a9ef78c23346d30429589a0d439ff83d1fa0 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/input-event-codes.h>
 #include "qdsp6/q6afe.h"
 #include "common.h"
+#include "sdw.h"
 
 #define DRIVER_NAME            "sm8250"
 #define MI2S_BCLK_RATE         1536000
index d9a3ce7b69e16cf03b73adc64d93a61293aed724..ade0507328af4bf9c7d6be10cbdd433fc13ac091 100644 (file)
@@ -353,7 +353,9 @@ int snd_sof_dbg_init(struct snd_sof_dev *sdev)
                        return err;
        }
 
-       return 0;
+       return snd_sof_debugfs_buf_item(sdev, &sdev->fw_state,
+                                       sizeof(sdev->fw_state),
+                                       "fw_state", 0444);
 }
 EXPORT_SYMBOL_GPL(snd_sof_dbg_init);
 
index df740be645e844f143e754d351b4561f7d2d57f3..8722bbd7fd3d7810cfd8effe9018ec9fce12da1d 100644 (file)
@@ -182,7 +182,7 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        const struct sof_ipc_pm_ops *pm_ops = sdev->ipc->ops->pm;
        const struct sof_ipc_tplg_ops *tplg_ops = sdev->ipc->ops->tplg;
        pm_message_t pm_state;
-       u32 target_state = 0;
+       u32 target_state = snd_sof_dsp_power_target(sdev);
        int ret;
 
        /* do nothing if dsp suspend callback is not set */
@@ -192,6 +192,9 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
        if (runtime_suspend && !sof_ops(sdev)->runtime_suspend)
                return 0;
 
+       if (tplg_ops && tplg_ops->tear_down_all_pipelines)
+               tplg_ops->tear_down_all_pipelines(sdev, false);
+
        if (sdev->fw_state != SOF_FW_BOOT_COMPLETE)
                goto suspend;
 
@@ -206,7 +209,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                }
        }
 
-       target_state = snd_sof_dsp_power_target(sdev);
        pm_state.event = target_state;
 
        /* Skip to platform-specific suspend if DSP is entering D0 */
@@ -217,9 +219,6 @@ static int sof_suspend(struct device *dev, bool runtime_suspend)
                goto suspend;
        }
 
-       if (tplg_ops->tear_down_all_pipelines)
-               tplg_ops->tear_down_all_pipelines(sdev, false);
-
        /* suspend DMA trace */
        sof_fw_trace_suspend(sdev, pm_state);
 
index 41ac7185b42b688a207bb8ca89d4832d0160d63c..4727043fd74580de6464e3076df0e997285a570e 100644 (file)
@@ -471,7 +471,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
        subs = find_matching_substream(chip, stream, target->sync_ep,
                                       target->fmt_type);
        if (!subs)
-               return sync_fmt;
+               goto end;
 
        high_score = 0;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -485,6 +485,7 @@ snd_usb_find_implicit_fb_sync_format(struct snd_usb_audio *chip,
                }
        }
 
+ end:
        if (fixed_rate)
                *fixed_rate = snd_usb_pcm_has_fixed_rate(subs);
        return sync_fmt;
index 59faa5a9a714154c0a387799a2a09a4a1baac091..b67617b68e509d2c86d78058f7796a64aab00f41 100644 (file)
@@ -304,7 +304,8 @@ static void line6_data_received(struct urb *urb)
                for (;;) {
                        done =
                                line6_midibuf_read(mb, line6->buffer_message,
-                                               LINE6_MIDI_MESSAGE_MAXLEN);
+                                                  LINE6_MIDI_MESSAGE_MAXLEN,
+                                                  LINE6_MIDIBUF_READ_RX);
 
                        if (done <= 0)
                                break;
index ba0e2b7e8fe194b19b7c83372edc4bdd5934c88a..0838632c788e4a120c50a20db56ff75d69e1ed77 100644 (file)
@@ -44,7 +44,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
        int req, done;
 
        for (;;) {
-               req = min(line6_midibuf_bytes_free(mb), line6->max_packet_size);
+               req = min3(line6_midibuf_bytes_free(mb), line6->max_packet_size,
+                          LINE6_FALLBACK_MAXPACKETSIZE);
                done = snd_rawmidi_transmit_peek(substream, chunk, req);
 
                if (done == 0)
@@ -56,7 +57,8 @@ static void line6_midi_transmit(struct snd_rawmidi_substream *substream)
 
        for (;;) {
                done = line6_midibuf_read(mb, chunk,
-                                         LINE6_FALLBACK_MAXPACKETSIZE);
+                                         LINE6_FALLBACK_MAXPACKETSIZE,
+                                         LINE6_MIDIBUF_READ_TX);
 
                if (done == 0)
                        break;
index 6a70463f82c4efcbd533c192844e072cae497cc2..e7f830f7526c96ad809e5e3f4cf43ba5da94b677 100644 (file)
@@ -9,6 +9,7 @@
 
 #include "midibuf.h"
 
+
 static int midibuf_message_length(unsigned char code)
 {
        int message_length;
@@ -20,12 +21,7 @@ static int midibuf_message_length(unsigned char code)
 
                message_length = length[(code >> 4) - 8];
        } else {
-               /*
-                  Note that according to the MIDI specification 0xf2 is
-                  the "Song Position Pointer", but this is used by Line 6
-                  to send sysex messages to the host.
-                */
-               static const int length[] = { -1, 2, -1, 2, -1, -1, 1, 1, 1, 1,
+               static const int length[] = { -1, 2, 2, 2, -1, -1, 1, 1, 1, -1,
                        1, 1, 1, -1, 1, 1
                };
                message_length = length[code & 0x0f];
@@ -125,7 +121,7 @@ int line6_midibuf_write(struct midi_buffer *this, unsigned char *data,
 }
 
 int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
-                      int length)
+                      int length, int read_type)
 {
        int bytes_used;
        int length1, length2;
@@ -148,9 +144,22 @@ int line6_midibuf_read(struct midi_buffer *this, unsigned char *data,
 
        length1 = this->size - this->pos_read;
 
-       /* check MIDI command length */
        command = this->buf[this->pos_read];
+       /*
+          PODxt always has status byte lower nibble set to 0010,
+          when it means to send 0000, so we correct if here so
+          that control/program changes come on channel 1 and
+          sysex message status byte is correct
+        */
+       if (read_type == LINE6_MIDIBUF_READ_RX) {
+               if (command == 0xb2 || command == 0xc2 || command == 0xf2) {
+                       unsigned char fixed = command & 0xf0;
+                       this->buf[this->pos_read] = fixed;
+                       command = fixed;
+               }
+       }
 
+       /* check MIDI command length */
        if (command & 0x80) {
                midi_length = midibuf_message_length(command);
                this->command_prev = command;
index 124a8f9f7e96c9832e17041fda8a27bee9963534..542e8d836f87df27637c61cb7400b91593e05b3f 100644 (file)
@@ -8,6 +8,9 @@
 #ifndef MIDIBUF_H
 #define MIDIBUF_H
 
+#define LINE6_MIDIBUF_READ_TX 0
+#define LINE6_MIDIBUF_READ_RX 1
+
 struct midi_buffer {
        unsigned char *buf;
        int size;
@@ -23,7 +26,7 @@ extern void line6_midibuf_destroy(struct midi_buffer *mb);
 extern int line6_midibuf_ignore(struct midi_buffer *mb, int length);
 extern int line6_midibuf_init(struct midi_buffer *mb, int size, int split);
 extern int line6_midibuf_read(struct midi_buffer *mb, unsigned char *data,
-                             int length);
+                             int length, int read_type);
 extern void line6_midibuf_reset(struct midi_buffer *mb);
 extern int line6_midibuf_write(struct midi_buffer *mb, unsigned char *data,
                               int length);
index cd41aa7f038513c4d90f788b50d78bc34320e91e..d173971e5f0297ed740833bcf216041a4fb94ff9 100644 (file)
@@ -159,8 +159,9 @@ static struct line6_pcm_properties pod_pcm_properties = {
        .bytes_per_channel = 3 /* SNDRV_PCM_FMTBIT_S24_3LE */
 };
 
+
 static const char pod_version_header[] = {
-       0xf2, 0x7e, 0x7f, 0x06, 0x02
+       0xf0, 0x7e, 0x7f, 0x06, 0x02
 };
 
 static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
index 99a66d0ef5b26f8c4792717bc565300d22ddd260..d959da7a1afbab5bca4c92c3613025e48a84432d 100644 (file)
@@ -160,9 +160,12 @@ find_substream_format(struct snd_usb_substream *subs,
 bool snd_usb_pcm_has_fixed_rate(struct snd_usb_substream *subs)
 {
        const struct audioformat *fp;
-       struct snd_usb_audio *chip = subs->stream->chip;
+       struct snd_usb_audio *chip;
        int rate = -1;
 
+       if (!subs)
+               return false;
+       chip = subs->stream->chip;
        if (!(chip->quirk_flags & QUIRK_FLAG_FIXED_RATE))
                return false;
        list_for_each_entry(fp, &subs->fmt_list, list) {
@@ -525,6 +528,8 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
                if (snd_usb_endpoint_compatible(chip, subs->data_endpoint,
                                                fmt, hw_params))
                        goto unlock;
+               if (stop_endpoints(subs, false))
+                       sync_pending_stops(subs);
                close_endpoints(chip, subs);
        }
 
@@ -787,11 +792,27 @@ static int apply_hw_params_minmax(struct snd_interval *it, unsigned int rmin,
        return changed;
 }
 
+/* get the specified endpoint object that is being used by other streams
+ * (i.e. the parameter is locked)
+ */
+static const struct snd_usb_endpoint *
+get_endpoint_in_use(struct snd_usb_audio *chip, int endpoint,
+                   const struct snd_usb_endpoint *ref_ep)
+{
+       const struct snd_usb_endpoint *ep;
+
+       ep = snd_usb_get_endpoint(chip, endpoint);
+       if (ep && ep->cur_audiofmt && (ep != ref_ep || ep->opened > 1))
+               return ep;
+       return NULL;
+}
+
 static int hw_rule_rate(struct snd_pcm_hw_params *params,
                        struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
        struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_interval *it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
        unsigned int rmin, rmax, r;
@@ -803,6 +824,29 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("rate limit %d for ep#%x\n",
+                                 ep->cur_rate, fp->endpoint);
+                       rmin = min(rmin, ep->cur_rate);
+                       rmax = max(rmax, ep->cur_rate);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("rate limit %d for sync_ep#%x\n",
+                                         ep->cur_rate, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_rate);
+                               rmax = max(rmax, ep->cur_rate);
+                               continue;
+                       }
+               }
+
                r = snd_usb_endpoint_get_clock_rate(chip, fp->clock);
                if (r > 0) {
                        if (!snd_interval_test(it, r))
@@ -872,6 +916,8 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
                          struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct snd_usb_endpoint *ep;
        const struct audioformat *fp;
        struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
        u64 fbits;
@@ -881,6 +927,27 @@ static int hw_rule_format(struct snd_pcm_hw_params *params,
        list_for_each_entry(fp, &subs->fmt_list, list) {
                if (!hw_check_valid_format(subs, params, fp))
                        continue;
+
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("format limit %d for ep#%x\n",
+                                 ep->cur_format, fp->endpoint);
+                       fbits |= pcm_format_to_bits(ep->cur_format);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("format limit %d for sync_ep#%x\n",
+                                         ep->cur_format, fp->sync_ep);
+                               fbits |= pcm_format_to_bits(ep->cur_format);
+                               continue;
+                       }
+               }
+
                fbits |= fp->formats;
        }
        return apply_hw_params_format_bits(fmt, fbits);
@@ -913,98 +980,95 @@ static int hw_rule_period_time(struct snd_pcm_hw_params *params,
        return apply_hw_params_minmax(it, pmin, UINT_MAX);
 }
 
-/* get the EP or the sync EP for implicit fb when it's already set up */
-static const struct snd_usb_endpoint *
-get_sync_ep_from_substream(struct snd_usb_substream *subs)
-{
-       struct snd_usb_audio *chip = subs->stream->chip;
-       const struct audioformat *fp;
-       const struct snd_usb_endpoint *ep;
-
-       list_for_each_entry(fp, &subs->fmt_list, list) {
-               ep = snd_usb_get_endpoint(chip, fp->endpoint);
-               if (ep && ep->cur_audiofmt) {
-                       /* if EP is already opened solely for this substream,
-                        * we still allow us to change the parameter; otherwise
-                        * this substream has to follow the existing parameter
-                        */
-                       if (ep->cur_audiofmt != subs->cur_audiofmt || ep->opened > 1)
-                               return ep;
-               }
-               if (!fp->implicit_fb)
-                       continue;
-               /* for the implicit fb, check the sync ep as well */
-               ep = snd_usb_get_endpoint(chip, fp->sync_ep);
-               if (ep && ep->cur_audiofmt)
-                       return ep;
-       }
-       return NULL;
-}
-
 /* additional hw constraints for implicit feedback mode */
-static int hw_rule_format_implicit_fb(struct snd_pcm_hw_params *params,
-                                     struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       return apply_hw_params_format_bits(fmt, pcm_format_to_bits(ep->cur_format));
-}
-
-static int hw_rule_rate_implicit_fb(struct snd_pcm_hw_params *params,
-                                   struct snd_pcm_hw_rule *rule)
-{
-       struct snd_usb_substream *subs = rule->private;
-       const struct snd_usb_endpoint *ep;
-       struct snd_interval *it;
-
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
-       it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE);
-       return apply_hw_params_minmax(it, ep->cur_rate, ep->cur_rate);
-}
-
 static int hw_rule_period_size_implicit_fb(struct snd_pcm_hw_params *params,
                                           struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIOD_SIZE);
-       return apply_hw_params_minmax(it, ep->cur_period_frames,
-                                     ep->cur_period_frames);
+       hwc_debug("hw_rule_period_size: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("period size limit %d for ep#%x\n",
+                                 ep->cur_period_frames, fp->endpoint);
+                       rmin = min(rmin, ep->cur_period_frames);
+                       rmax = max(rmax, ep->cur_period_frames);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("period size limit %d for sync_ep#%x\n",
+                                         ep->cur_period_frames, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_period_frames);
+                               rmax = max(rmax, ep->cur_period_frames);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 static int hw_rule_periods_implicit_fb(struct snd_pcm_hw_params *params,
                                       struct snd_pcm_hw_rule *rule)
 {
        struct snd_usb_substream *subs = rule->private;
+       struct snd_usb_audio *chip = subs->stream->chip;
+       const struct audioformat *fp;
        const struct snd_usb_endpoint *ep;
        struct snd_interval *it;
+       unsigned int rmin, rmax;
 
-       ep = get_sync_ep_from_substream(subs);
-       if (!ep)
-               return 0;
-
-       hwc_debug("applying %s\n", __func__);
        it = hw_param_interval(params, SNDRV_PCM_HW_PARAM_PERIODS);
-       return apply_hw_params_minmax(it, ep->cur_buffer_periods,
-                                     ep->cur_buffer_periods);
+       hwc_debug("hw_rule_periods: (%u,%u)\n", it->min, it->max);
+       rmin = UINT_MAX;
+       rmax = 0;
+       list_for_each_entry(fp, &subs->fmt_list, list) {
+               if (!hw_check_valid_format(subs, params, fp))
+                       continue;
+               ep = get_endpoint_in_use(chip, fp->endpoint,
+                                        subs->data_endpoint);
+               if (ep) {
+                       hwc_debug("periods limit %d for ep#%x\n",
+                                 ep->cur_buffer_periods, fp->endpoint);
+                       rmin = min(rmin, ep->cur_buffer_periods);
+                       rmax = max(rmax, ep->cur_buffer_periods);
+                       continue;
+               }
+
+               if (fp->implicit_fb) {
+                       ep = get_endpoint_in_use(chip, fp->sync_ep,
+                                                subs->sync_endpoint);
+                       if (ep) {
+                               hwc_debug("periods limit %d for sync_ep#%x\n",
+                                         ep->cur_buffer_periods, fp->sync_ep);
+                               rmin = min(rmin, ep->cur_buffer_periods);
+                               rmax = max(rmax, ep->cur_buffer_periods);
+                               continue;
+                       }
+               }
+       }
+
+       if (!rmax)
+               return 0; /* no limit by implicit fb */
+       return apply_hw_params_minmax(it, rmin, rmax);
 }
 
 /*
@@ -1113,16 +1177,6 @@ static int setup_hw_info(struct snd_pcm_runtime *runtime, struct snd_usb_substre
                return err;
 
        /* additional hw constraints for implicit fb */
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_FORMAT,
-                                 hw_rule_format_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_FORMAT, -1);
-       if (err < 0)
-               return err;
-       err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_RATE,
-                                 hw_rule_rate_implicit_fb, subs,
-                                 SNDRV_PCM_HW_PARAM_RATE, -1);
-       if (err < 0)
-               return err;
        err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                  hw_rule_period_size_implicit_fb, subs,
                                  SNDRV_PCM_HW_PARAM_PERIOD_SIZE, -1);
index f75601ca2d525688b63396066e91543db4132911..f10f4e6d3fb851661c3553f43084f1d678ca1752 100644 (file)
@@ -1222,6 +1222,12 @@ static int __snd_usb_parse_audio_interface(struct snd_usb_audio *chip,
                        if (err < 0)
                                return err;
                }
+
+               /* try to set the interface... */
+               usb_set_interface(chip->dev, iface_no, 0);
+               snd_usb_init_pitch(chip, fp);
+               snd_usb_init_sample_rate(chip, fp, fp->rate_max);
+               usb_set_interface(chip->dev, iface_no, altno);
        }
        return 0;
 }
index 4041748c12e515c090b62f9918a0877ccd0d574c..b66e037710d0dfcbb608b45d8d02a9905977eb3a 100644 (file)
@@ -311,7 +311,7 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        return xenbus_switch_state(xb_dev, XenbusStateInitialising);
 }
 
-static int xen_drv_remove(struct xenbus_device *dev)
+static void xen_drv_remove(struct xenbus_device *dev)
 {
        struct xen_snd_front_info *front_info = dev_get_drvdata(&dev->dev);
        int to = 100;
@@ -345,7 +345,6 @@ static int xen_drv_remove(struct xenbus_device *dev)
 
        xen_snd_drv_fini(front_info);
        xenbus_frontend_closed(dev);
-       return 0;
 }
 
 static const struct xenbus_device_id xen_drv_ids[] = {
index abc418650fec04fda7f79c2c46f3ec255124781d..683ca3af408485aa6cdf4aa44b5988d91cd6a0f3 100644 (file)
@@ -41,7 +41,7 @@
        (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
 
 #define MIDR_CPU_MODEL(imp, partnum) \
-       (((imp)                 << MIDR_IMPLEMENTOR_SHIFT) | \
+       ((_AT(u32, imp)         << MIDR_IMPLEMENTOR_SHIFT) | \
        (0xf                    << MIDR_ARCHITECTURE_SHIFT) | \
        ((partnum)              << MIDR_PARTNUM_SHIFT))
 
@@ -80,6 +80,7 @@
 #define ARM_CPU_PART_CORTEX_X1         0xD44
 #define ARM_CPU_PART_CORTEX_A510       0xD46
 #define ARM_CPU_PART_CORTEX_A710       0xD47
+#define ARM_CPU_PART_CORTEX_A715       0xD4D
 #define ARM_CPU_PART_CORTEX_X2         0xD48
 #define ARM_CPU_PART_NEOVERSE_N2       0xD49
 #define ARM_CPU_PART_CORTEX_A78C       0xD4B
 #define APPLE_CPU_PART_M1_FIRESTORM_PRO        0x025
 #define APPLE_CPU_PART_M1_ICESTORM_MAX 0x028
 #define APPLE_CPU_PART_M1_FIRESTORM_MAX        0x029
+#define APPLE_CPU_PART_M2_BLIZZARD     0x032
+#define APPLE_CPU_PART_M2_AVALANCHE    0x033
 
 #define AMPERE_CPU_PART_AMPERE1                0xAC3
 
 #define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
 #define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
 #define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
 #define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
 #define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
 #define MIDR_CORTEX_A78C       MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78C)
 #define MIDR_APPLE_M1_FIRESTORM_PRO MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_PRO)
 #define MIDR_APPLE_M1_ICESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_ICESTORM_MAX)
 #define MIDR_APPLE_M1_FIRESTORM_MAX MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M1_FIRESTORM_MAX)
+#define MIDR_APPLE_M2_BLIZZARD MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_BLIZZARD)
+#define MIDR_APPLE_M2_AVALANCHE MIDR_CPU_MODEL(ARM_CPU_IMP_APPLE, APPLE_CPU_PART_M2_AVALANCHE)
 #define MIDR_AMPERE1 MIDR_CPU_MODEL(ARM_CPU_IMP_AMPERE, AMPERE_CPU_PART_AMPERE1)
 
 /* Fujitsu Erratum 010001 affects A64FX 1.0 and 1.1, (v0r0 and v1r0) */
index 316917b9870704de245f002cb4261c46f8a2fea4..a7a857f1784d80d6264eeebecbeb371c7d13ba5b 100644 (file)
@@ -43,6 +43,7 @@
 #define __KVM_HAVE_VCPU_EVENTS
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
+#define KVM_DIRTY_LOG_PAGE_OFFSET 64
 
 #define KVM_REG_SIZE(id)                                               \
        (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
index 649e50a8f9ddff8780e71fb7d3862b094872a322..e48deab8901d4ecb9fddaaf334247e27fb6c1762 100644 (file)
@@ -206,6 +206,8 @@ struct kvm_msr_list {
 struct kvm_msr_filter_range {
 #define KVM_MSR_FILTER_READ  (1 << 0)
 #define KVM_MSR_FILTER_WRITE (1 << 1)
+#define KVM_MSR_FILTER_RANGE_VALID_MASK (KVM_MSR_FILTER_READ | \
+                                        KVM_MSR_FILTER_WRITE)
        __u32 flags;
        __u32 nmsrs; /* number of msrs in bitmap */
        __u32 base;  /* MSR index the bitmap starts at */
@@ -214,8 +216,11 @@ struct kvm_msr_filter_range {
 
 #define KVM_MSR_FILTER_MAX_RANGES 16
 struct kvm_msr_filter {
+#ifndef __KERNEL__
 #define KVM_MSR_FILTER_DEFAULT_ALLOW (0 << 0)
+#endif
 #define KVM_MSR_FILTER_DEFAULT_DENY  (1 << 0)
+#define KVM_MSR_FILTER_VALID_MASK (KVM_MSR_FILTER_DEFAULT_DENY)
        __u32 flags;
        struct kvm_msr_filter_range ranges[KVM_MSR_FILTER_MAX_RANGES];
 };
index 787b857d3fb5fb4b8da5d86afce6e364d7721034..f610e184ce02abf6163c2863eb7bc63a7ec1eb9a 100644 (file)
@@ -145,6 +145,10 @@ ifeq ($(feature-llvm),1)
   LLVM_CONFIG_LIB_COMPONENTS := mcdisassembler all-targets
   CFLAGS  += $(shell $(LLVM_CONFIG) --cflags --libs $(LLVM_CONFIG_LIB_COMPONENTS))
   LIBS    += $(shell $(LLVM_CONFIG) --libs $(LLVM_CONFIG_LIB_COMPONENTS))
+  ifeq ($(shell $(LLVM_CONFIG) --shared-mode),static)
+    LIBS += $(shell $(LLVM_CONFIG) --system-libs $(LLVM_CONFIG_LIB_COMPONENTS))
+    LIBS += -lstdc++
+  endif
   LDFLAGS += $(shell $(LLVM_CONFIG) --ldflags)
 else
   # Fall back on libbfd
index cc7070c7439ba6a5327d992a6cebf4fdc591f7ab..b4898ff085ded5a9a8720cf6b2e9e400835a93c4 100644 (file)
 #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
 #endif // static_assert
 
+
+/*
+ * Compile time check that field has an expected offset
+ */
+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset)     \
+       BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset),    \
+               "Offset of " #field " in " #type " has changed.")
+
+
 #endif /* _LINUX_BUILD_BUG_H */
index 5fc5b8029bff9ebba2e1ed331ff48cce1f679e23..7380093ba9e7dfb54b3f3dc88230607b269705e2 100644 (file)
@@ -192,6 +192,7 @@ struct sys_stat_struct {
 __asm__ (".section .text\n"
     ".weak __start\n"
     ".set nomips16\n"
+    ".set push\n"
     ".set    noreorder\n"
     ".option pic0\n"
     ".ent __start\n"
@@ -210,6 +211,7 @@ __asm__ (".section .text\n"
     "li $v0, 4001\n"              // NR_exit == 4001
     "syscall\n"
     ".end __start\n"
+    ".set pop\n"
     "");
 
 #endif // _NOLIBC_ARCH_MIPS_H
index ba04771cb3a3418fca083b85b4450ed5cf4b6046..a3bdd9803f8cb32ede92cc193ba214ba2349d294 100644 (file)
 #define O_RDONLY            0
 #define O_WRONLY            1
 #define O_RDWR              2
-#define O_CREAT         0x100
-#define O_EXCL          0x200
-#define O_NOCTTY        0x400
-#define O_TRUNC        0x1000
-#define O_APPEND       0x2000
-#define O_NONBLOCK     0x4000
-#define O_DIRECTORY  0x200000
+#define O_CREAT          0x40
+#define O_EXCL           0x80
+#define O_NOCTTY        0x100
+#define O_TRUNC         0x200
+#define O_APPEND        0x400
+#define O_NONBLOCK      0x800
+#define O_DIRECTORY   0x10000
 
 struct sys_stat_struct {
        unsigned long   st_dev;         /* Device.  */
index e3000b2992d7b92f6e1c2927bf80b6ca0095cc76..6f90706d06442d87e262cf36c00ce7fe61972dbf 100644 (file)
@@ -96,4 +96,7 @@ int ispunct(int c)
        return isgraph(c) && !isalnum(c);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_CTYPE_H */
index 06893d6dfb7a6b7a05b79f5d67f4493735055b8d..9dc4919c769b7db38ee9023ad04e2826f33867ea 100644 (file)
@@ -24,4 +24,7 @@ static int errno;
  */
 #define MAX_ERRNO 4095
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_ERRNO_H */
index ef47e71e2be37665aa1dc65f26f38c5a7de380ca..137552216e46960dc8baa1e25759daa1f152c3c9 100644 (file)
@@ -19,4 +19,7 @@ int raise(int signal)
        return sys_kill(sys_getpid(), signal);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_SIGNAL_H */
index a3cebc4bc3ac4d997431db4fd2ce72fb5fd64387..96ac8afc5aeedc047eafaa46fb1a5b024865ca9f 100644 (file)
@@ -303,4 +303,7 @@ void perror(const char *msg)
        fprintf(stderr, "%s%serrno=%d\n", (msg && *msg) ? msg : "", (msg && *msg) ? ": " : "", errno);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STDIO_H */
index 92378c4b9660516255377d2819f72d5eff77afda..a24000d1e8222c2b23511529af32de14a66206e3 100644 (file)
@@ -419,4 +419,7 @@ char *u64toa(uint64_t in)
        return itoa_buffer;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STDLIB_H */
index ad97c0d522b8e40af7ba428e669c553d66e61d10..fffdaf6ff46731d5aac33b7b7a244471f24a6724 100644 (file)
@@ -88,8 +88,11 @@ void *memset(void *dst, int b, size_t len)
 {
        char *p = dst;
 
-       while (len--)
+       while (len--) {
+               /* prevent gcc from recognizing memset() here */
+               asm volatile("");
                *(p++) = b;
+       }
        return dst;
 }
 
@@ -285,4 +288,7 @@ char *strrchr(const char *s, int c)
        return (char *)ret;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_STRING_H */
index ce3ee03aa6794b484e98c93ba4f4addca62e06d0..78473d34e27cd66fcae7d900e1d21f9074f52de6 100644 (file)
@@ -1243,5 +1243,7 @@ ssize_t write(int fd, const void *buf, size_t count)
        return ret;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
 
 #endif /* _NOLIBC_SYS_H */
index d18b7661fdd713ab4fa59e59ffc7923265048b8d..84655361b9ad2ce141f9af2759f2b128ae8a83a3 100644 (file)
@@ -25,4 +25,7 @@ time_t time(time_t *tptr)
        return tv.tv_sec;
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_TIME_H */
index 959997034e553a1cefa2e920209c5132e2e75048..fbbc0e68c001b053a8d75f3d3bcfa3c7aaa2ab60 100644 (file)
 #define S_IFSOCK       0140000
 #define S_IFMT         0170000
 
-#define S_ISDIR(mode)  (((mode) & S_IFDIR)  == S_IFDIR)
-#define S_ISCHR(mode)  (((mode) & S_IFCHR)  == S_IFCHR)
-#define S_ISBLK(mode)  (((mode) & S_IFBLK)  == S_IFBLK)
-#define S_ISREG(mode)  (((mode) & S_IFREG)  == S_IFREG)
-#define S_ISFIFO(mode) (((mode) & S_IFIFO)  == S_IFIFO)
-#define S_ISLNK(mode)  (((mode) & S_IFLNK)  == S_IFLNK)
-#define S_ISSOCK(mode) (((mode) & S_IFSOCK) == S_IFSOCK)
+#define S_ISDIR(mode)  (((mode) & S_IFMT) == S_IFDIR)
+#define S_ISCHR(mode)  (((mode) & S_IFMT) == S_IFCHR)
+#define S_ISBLK(mode)  (((mode) & S_IFMT) == S_IFBLK)
+#define S_ISREG(mode)  (((mode) & S_IFMT) == S_IFREG)
+#define S_ISFIFO(mode) (((mode) & S_IFMT) == S_IFIFO)
+#define S_ISLNK(mode)  (((mode) & S_IFMT) == S_IFLNK)
+#define S_ISSOCK(mode) (((mode) & S_IFMT) == S_IFSOCK)
 
 /* dirent types */
 #define DT_UNKNOWN     0x0
 #define EXIT_SUCCESS 0
 #define EXIT_FAILURE 1
 
+#define FD_SETIDXMASK (8 * sizeof(unsigned long))
+#define FD_SETBITMASK (8 * sizeof(unsigned long)-1)
+
 /* for select() */
 typedef struct {
-       uint32_t fd32[(FD_SETSIZE + 31) / 32];
+       unsigned long fds[(FD_SETSIZE + FD_SETBITMASK) / FD_SETIDXMASK];
 } fd_set;
 
-#define FD_CLR(fd, set) do {                                            \
-               fd_set *__set = (set);                                  \
-               int __fd = (fd);                                        \
-               if (__fd >= 0)                                          \
-                       __set->fd32[__fd / 32] &= ~(1U << (__fd & 31)); \
+#define FD_CLR(fd, set) do {                                           \
+               fd_set *__set = (set);                                  \
+               int __fd = (fd);                                        \
+               if (__fd >= 0)                                          \
+                       __set->fds[__fd / FD_SETIDXMASK] &=             \
+                               ~(1U << (__fd & FX_SETBITMASK));        \
        } while (0)
 
-#define FD_SET(fd, set) do {                                            \
-               fd_set *__set = (set);                                  \
-               int __fd = (fd);                                        \
-               if (__fd >= 0)                                          \
-                       __set->fd32[__fd / 32] |= 1U << (__fd & 31);    \
+#define FD_SET(fd, set) do {                                           \
+               fd_set *__set = (set);                                  \
+               int __fd = (fd);                                        \
+               if (__fd >= 0)                                          \
+                       __set->fds[__fd / FD_SETIDXMASK] |=             \
+                               1 << (__fd & FD_SETBITMASK);            \
        } while (0)
 
-#define FD_ISSET(fd, set) ({                                                  \
-               fd_set *__set = (set);                                        \
-               int __fd = (fd);                                              \
-               int __r = 0;                                                  \
-               if (__fd >= 0)                                                \
-                       __r = !!(__set->fd32[__fd / 32] & 1U << (__fd & 31)); \
-               __r;                                                          \
+#define FD_ISSET(fd, set) ({                                           \
+                       fd_set *__set = (set);                          \
+                       int __fd = (fd);                                \
+               int __r = 0;                                            \
+               if (__fd >= 0)                                          \
+                       __r = !!(__set->fds[__fd / FD_SETIDXMASK] &     \
+1U << (__fd & FD_SET_BITMASK));                                                \
+               __r;                                                    \
        })
 
-#define FD_ZERO(set) do {                                               \
-               fd_set *__set = (set);                                  \
-               int __idx;                                              \
-               for (__idx = 0; __idx < (FD_SETSIZE+31) / 32; __idx ++) \
-                       __set->fd32[__idx] = 0;                         \
+#define FD_ZERO(set) do {                                              \
+               fd_set *__set = (set);                                  \
+               int __idx;                                              \
+               int __size = (FD_SETSIZE+FD_SETBITMASK) / FD_SETIDXMASK;\
+               for (__idx = 0; __idx < __size; __idx++)                \
+                       __set->fds[__idx] = 0;                          \
        } while (0)
 
 /* for poll() */
@@ -202,4 +209,7 @@ struct stat {
 })
 #endif
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_TYPES_H */
index 1c25e20ee360667540ab80fad43ed2d31935d789..1cfcd52106a420afc327ef8403afdd73da0efb10 100644 (file)
@@ -51,4 +51,7 @@ int tcsetpgrp(int fd, pid_t pid)
        return ioctl(fd, TIOCSPGRP, &pid);
 }
 
+/* make sure to include all global symbols */
+#include "nolibc.h"
+
 #endif /* _NOLIBC_UNISTD_H */
index 20522d4ba1e0d8b058876d93fd9dd554910f6263..55155e262646e5fc48b6317054b07b1c15aad02a 100644 (file)
@@ -1767,6 +1767,7 @@ struct kvm_xen_hvm_attr {
                __u8 runstate_update_flag;
                struct {
                        __u64 gfn;
+#define KVM_XEN_INVALID_GFN ((__u64)-1)
                } shared_info;
                struct {
                        __u32 send_port;
@@ -1798,6 +1799,7 @@ struct kvm_xen_hvm_attr {
        } u;
 };
 
+
 /* Available with KVM_CAP_XEN_HVM / KVM_XEN_HVM_CONFIG_SHARED_INFO */
 #define KVM_XEN_ATTR_TYPE_LONG_MODE            0x0
 #define KVM_XEN_ATTR_TYPE_SHARED_INFO          0x1
@@ -1823,6 +1825,7 @@ struct kvm_xen_vcpu_attr {
        __u16 pad[3];
        union {
                __u64 gpa;
+#define KVM_XEN_INVALID_GPA ((__u64)-1)
                __u64 pad[8];
                struct {
                        __u64 state;
index 4350be739f4fac504657fe76d4b8c93da1d244b7..4b7c8b33069e51e5839de0144882b4edfbef6872 100644 (file)
@@ -427,6 +427,15 @@ static int decode_instructions(struct objtool_file *file)
                        if (func->type != STT_NOTYPE && func->type != STT_FUNC)
                                continue;
 
+                       if (func->offset == sec->sh.sh_size) {
+                               /* Heuristic: likely an "end" symbol */
+                               if (func->type == STT_NOTYPE)
+                                       continue;
+                               WARN("%s(): STT_FUNC at end of section",
+                                    func->name);
+                               return -1;
+                       }
+
                        if (func->return_thunk || func->alias != func)
                                continue;
 
index 6e7b88917ca0a10a9c8d9cc052f214451bfa78ac..ba5d942e4c6aabe22a222dd0c20b06c1fe6f289a 100644 (file)
@@ -267,7 +267,7 @@ $(OUTPUT)%.xml : %.txt
        $(ASCIIDOC) -b docbook -d manpage \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) \
                -aperf_date=$(shell git log -1 --pretty="format:%cd" \
-                               --date=short $<) \
+                               --date=short --no-show-signature $<) \
                -o $@+ $< && \
        mv $@+ $@
 
index c2504c39bdcb899235352962e05684570f11bde4..5b87846759036f6f4c3b32b45270e4e871aee29d 100644 (file)
@@ -589,6 +589,8 @@ ifndef NO_LIBELF
           $(call feature_check,libbpf-bpf_program__set_insns)
           ifeq ($(feature-libbpf-bpf_program__set_insns), 1)
             CFLAGS += -DHAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
+          else
+            dummy := $(error Error: libbpf devel library needs to be >= 0.8.0 to build with LIBBPF_DYNAMIC, update or build statically with the version that comes with the kernel sources);
           endif
           $(call feature_check,libbpf-btf__raw_data)
           ifeq ($(feature-libbpf-btf__raw_data), 1)
@@ -602,6 +604,8 @@ ifndef NO_LIBELF
           dummy := $(error Error: No libbpf devel library found, please install libbpf-devel);
         endif
       else
+        # Libbpf will be built as a static library from tools/lib/bpf.
+       LIBBPF_STATIC := 1
        CFLAGS += -DHAVE_LIBBPF_BTF__LOAD_FROM_KERNEL_BY_ID
         CFLAGS += -DHAVE_LIBBPF_BPF_PROG_LOAD
         CFLAGS += -DHAVE_LIBBPF_BPF_OBJECT__NEXT_PROGRAM
@@ -1314,14 +1318,6 @@ tip_instdir_SQ = $(subst ','\'',$(tip_instdir))
 
 export perfexec_instdir_SQ
 
-# If we install to $(HOME) we keep the traceevent default:
-# $(HOME)/.traceevent/plugins
-# Otherwise we install plugins into the global $(libdir).
-ifdef DESTDIR
-plugindir=$(libdir)/traceevent/plugins
-plugindir_SQ= $(subst ','\'',$(plugindir))
-endif
-
 print_var = $(eval $(print_var_code)) $(info $(MSG))
 define print_var_code
     MSG = $(shell printf '...%40s: %s' $(1) $($(1)))
index 13e7d26e77f043c3dd7458dc79d0e3d982ce465d..b7d9c42062300d04b7e615f7a68c5a1b46fc7287 100644 (file)
@@ -303,10 +303,12 @@ ifneq ($(OUTPUT),)
 else
   LIBBPF_OUTPUT = $(CURDIR)/libbpf
 endif
-LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
-LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
-LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
-CFLAGS += -I$(LIBBPF_OUTPUT)/include
+ifdef LIBBPF_STATIC
+  LIBBPF_DESTDIR = $(LIBBPF_OUTPUT)
+  LIBBPF_INCLUDE = $(LIBBPF_DESTDIR)/include
+  LIBBPF = $(LIBBPF_OUTPUT)/libbpf.a
+  CFLAGS += -I$(LIBBPF_OUTPUT)/include
+endif
 
 ifneq ($(OUTPUT),)
   LIBSUBCMD_OUTPUT = $(abspath $(OUTPUT))/libsubcmd
@@ -393,10 +395,8 @@ endif
 export PERL_PATH
 
 PERFLIBS = $(LIBAPI) $(LIBPERF) $(LIBSUBCMD) $(LIBSYMBOL)
-ifndef NO_LIBBPF
-  ifndef LIBBPF_DYNAMIC
-    PERFLIBS += $(LIBBPF)
-  endif
+ifdef LIBBPF_STATIC
+  PERFLIBS += $(LIBBPF)
 endif
 
 # We choose to avoid "if .. else if .. else .. endif endif"
@@ -756,12 +756,15 @@ prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioc
        $(arch_errno_name_array) \
        $(sync_file_range_arrays) \
        $(LIBAPI) \
-       $(LIBBPF) \
        $(LIBPERF) \
        $(LIBSUBCMD) \
        $(LIBSYMBOL) \
        bpf-skel
 
+ifdef LIBBPF_STATIC
+prepare: $(LIBBPF)
+endif
+
 $(OUTPUT)%.o: %.c prepare FORCE
        $(Q)$(MAKE) -f $(srctree)/tools/build/Makefile.build dir=$(build-dir) $@
 
@@ -819,7 +822,7 @@ $(patsubst perf-%,%.o,$(PROGRAMS)): $(wildcard */*.h)
 
 $(LIBAPI): FORCE | $(LIBAPI_OUTPUT)
        $(Q)$(MAKE) -C $(LIBAPI_DIR) O=$(LIBAPI_OUTPUT) \
-               DESTDIR=$(LIBAPI_DESTDIR) prefix= \
+               DESTDIR=$(LIBAPI_DESTDIR) prefix= subdir= \
                $@ install_headers
 
 $(LIBAPI)-clean:
@@ -828,7 +831,7 @@ $(LIBAPI)-clean:
 
 $(LIBBPF): FORCE | $(LIBBPF_OUTPUT)
        $(Q)$(MAKE) -C $(LIBBPF_DIR) FEATURES_DUMP=$(FEATURE_DUMP_EXPORT) \
-               O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \
+               O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= subdir= \
                $@ install_headers
 
 $(LIBBPF)-clean:
@@ -837,7 +840,7 @@ $(LIBBPF)-clean:
 
 $(LIBPERF): FORCE | $(LIBPERF_OUTPUT)
        $(Q)$(MAKE) -C $(LIBPERF_DIR) O=$(LIBPERF_OUTPUT) \
-               DESTDIR=$(LIBPERF_DESTDIR) prefix= \
+               DESTDIR=$(LIBPERF_DESTDIR) prefix= subdir= \
                $@ install_headers
 
 $(LIBPERF)-clean:
@@ -846,7 +849,7 @@ $(LIBPERF)-clean:
 
 $(LIBSUBCMD): FORCE | $(LIBSUBCMD_OUTPUT)
        $(Q)$(MAKE) -C $(LIBSUBCMD_DIR) O=$(LIBSUBCMD_OUTPUT) \
-               DESTDIR=$(LIBSUBCMD_DESTDIR) prefix= \
+               DESTDIR=$(LIBSUBCMD_DESTDIR) prefix= subdir= \
                $@ install_headers
 
 $(LIBSUBCMD)-clean:
@@ -855,7 +858,7 @@ $(LIBSUBCMD)-clean:
 
 $(LIBSYMBOL): FORCE | $(LIBSYMBOL_OUTPUT)
        $(Q)$(MAKE) -C $(LIBSYMBOL_DIR) O=$(LIBSYMBOL_OUTPUT) \
-               DESTDIR=$(LIBSYMBOL_DESTDIR) prefix= \
+               DESTDIR=$(LIBSYMBOL_DESTDIR) prefix= subdir= \
                $@ install_headers
 
 $(LIBSYMBOL)-clean:
index 19536e17285071873317b5022ec9130fa6a6ef1d..54a198714eb82ba88dd15f549bd7f01cdeb88068 100644 (file)
@@ -4,7 +4,7 @@
 #include <elfutils/libdwfl.h>
 #include "../../util/unwind-libdw.h"
 #include "../../util/perf_regs.h"
-#include "../../util/event.h"
+#include "../../util/sample.h"
 
 bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg)
 {
index e20656c431a4bb8d952b30f964f0cc4deb3a0211..8ae0a1535293620505d5e41984cbd8a70ff79675 100644 (file)
@@ -26,6 +26,7 @@
 #include "util/string2.h"
 
 #include <linux/kernel.h>
+#include <linux/numa.h>
 #include <linux/rbtree.h>
 #include <linux/string.h>
 #include <linux/zalloc.h>
@@ -185,22 +186,33 @@ static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *s
        total_allocated += bytes_alloc;
 
        nr_allocs++;
-       return 0;
-}
 
-static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample)
-{
-       int ret = evsel__process_alloc_event(evsel, sample);
+       /*
+        * Commit 11e9734bcb6a ("mm/slab_common: unify NUMA and UMA
+        * version of tracepoints") adds the field "node" into the
+        * tracepoints 'kmalloc' and 'kmem_cache_alloc'.
+        *
+        * The legacy tracepoints 'kmalloc_node' and 'kmem_cache_alloc_node'
+        * also contain the field "node".
+        *
+        * If the tracepoint contains the field "node" the tool stats the
+        * cross allocation.
+        */
+       if (evsel__field(evsel, "node")) {
+               int node1, node2;
 
-       if (!ret) {
-               int node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu}),
-                   node2 = evsel__intval(evsel, sample, "node");
+               node1 = cpu__get_node((struct perf_cpu){.cpu = sample->cpu});
+               node2 = evsel__intval(evsel, sample, "node");
 
-               if (node1 != node2)
+               /*
+                * If the field "node" is NUMA_NO_NODE (-1), we don't take it
+                * as a cross allocation.
+                */
+               if ((node2 != NUMA_NO_NODE) && (node1 != node2))
                        nr_cross_allocs++;
        }
 
-       return ret;
+       return 0;
 }
 
 static int ptr_cmp(void *, void *);
@@ -1369,8 +1381,8 @@ static int __cmd_kmem(struct perf_session *session)
                /* slab allocator */
                { "kmem:kmalloc",               evsel__process_alloc_event, },
                { "kmem:kmem_cache_alloc",      evsel__process_alloc_event, },
-               { "kmem:kmalloc_node",          evsel__process_alloc_node_event, },
-               { "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, },
+               { "kmem:kmalloc_node",          evsel__process_alloc_event, },
+               { "kmem:kmem_cache_alloc_node", evsel__process_alloc_event, },
                { "kmem:kfree",                 evsel__process_free_event, },
                { "kmem:kmem_cache_free",       evsel__process_free_event, },
                /* page allocator */
@@ -1824,6 +1836,19 @@ static int parse_line_opt(const struct option *opt __maybe_unused,
        return 0;
 }
 
+static bool slab_legacy_tp_is_exposed(void)
+{
+       /*
+        * The tracepoints "kmem:kmalloc_node" and
+        * "kmem:kmem_cache_alloc_node" have been removed on the latest
+        * kernel, if the tracepoint "kmem:kmalloc_node" is existed it
+        * means the tool is running on an old kernel, we need to
+        * rollback to support these legacy tracepoints.
+        */
+       return IS_ERR(trace_event__tp_format("kmem", "kmalloc_node")) ?
+               false : true;
+}
+
 static int __cmd_record(int argc, const char **argv)
 {
        const char * const record_args[] = {
@@ -1831,22 +1856,28 @@ static int __cmd_record(int argc, const char **argv)
        };
        const char * const slab_events[] = {
        "-e", "kmem:kmalloc",
-       "-e", "kmem:kmalloc_node",
        "-e", "kmem:kfree",
        "-e", "kmem:kmem_cache_alloc",
-       "-e", "kmem:kmem_cache_alloc_node",
        "-e", "kmem:kmem_cache_free",
        };
+       const char * const slab_legacy_events[] = {
+       "-e", "kmem:kmalloc_node",
+       "-e", "kmem:kmem_cache_alloc_node",
+       };
        const char * const page_events[] = {
        "-e", "kmem:mm_page_alloc",
        "-e", "kmem:mm_page_free",
        };
        unsigned int rec_argc, i, j;
        const char **rec_argv;
+       unsigned int slab_legacy_tp_exposed = slab_legacy_tp_is_exposed();
 
        rec_argc = ARRAY_SIZE(record_args) + argc - 1;
-       if (kmem_slab)
+       if (kmem_slab) {
                rec_argc += ARRAY_SIZE(slab_events);
+               if (slab_legacy_tp_exposed)
+                       rec_argc += ARRAY_SIZE(slab_legacy_events);
+       }
        if (kmem_page)
                rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
 
@@ -1861,6 +1892,10 @@ static int __cmd_record(int argc, const char **argv)
        if (kmem_slab) {
                for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
                        rec_argv[i] = strdup(slab_events[j]);
+               if (slab_legacy_tp_exposed) {
+                       for (j = 0; j < ARRAY_SIZE(slab_legacy_events); j++, i++)
+                               rec_argv[i] = strdup(slab_legacy_events[j]);
+               }
        }
        if (kmem_page) {
                rec_argv[i++] = strdup("-g");
index 718b82bfcdff951ba54ddca0f90c27d9380c752f..506c2fe42d523a873b0caecc45b7b0522fec12d0 100644 (file)
@@ -1670,6 +1670,7 @@ static int __cmd_report(bool display_info)
 
        /* for lock function check */
        symbol_conf.sort_by_name = true;
+       symbol_conf.allow_aliases = true;
        symbol__init(&session->header.env);
 
        if (!data.is_pipe) {
@@ -1757,6 +1758,7 @@ static int __cmd_contention(int argc, const char **argv)
 
        /* for lock function check */
        symbol_conf.sort_by_name = true;
+       symbol_conf.allow_aliases = true;
        symbol__init(&session->header.env);
 
        if (use_bpf) {
index 86e06f136f40221eeb3e22275dbae011bcb26d2e..d21fe0f32a6deb666d852a5ebb0692f318d3f253 100644 (file)
@@ -16,7 +16,9 @@
 
 #include "util/record.h"
 #include <api/fs/tracing_path.h>
+#ifdef HAVE_LIBBPF_SUPPORT
 #include <bpf/bpf.h>
+#endif
 #include "util/bpf_map.h"
 #include "util/rlimit.h"
 #include "builtin.h"
index 8fcab5ad00c5cc1cc499ee2ba8e986805921071c..e8d2762adade1c34a264e9b1b2fbf0ba6a1201db 100644 (file)
@@ -16,20 +16,20 @@ perf-ftrace                 mainporcelain common
 perf-inject                    mainporcelain common
 perf-iostat                    mainporcelain common
 perf-kallsyms                  mainporcelain common
-perf-kmem                      mainporcelain common
+perf-kmem                      mainporcelain traceevent
 perf-kvm                       mainporcelain common
-perf-kwork                     mainporcelain common
+perf-kwork                     mainporcelain traceevent
 perf-list                      mainporcelain common
-perf-lock                      mainporcelain common
+perf-lock                      mainporcelain traceevent
 perf-mem                       mainporcelain common
 perf-probe                     mainporcelain full
 perf-record                    mainporcelain common
 perf-report                    mainporcelain common
-perf-sched                     mainporcelain common
+perf-sched                     mainporcelain traceevent
 perf-script                    mainporcelain common
 perf-stat                      mainporcelain common
 perf-test                      mainporcelain common
-perf-timechart                 mainporcelain common
+perf-timechart                 mainporcelain traceevent
 perf-top                       mainporcelain common
 perf-trace                     mainporcelain audit
 perf-version                   mainporcelain common
index bd83d364cf30d6dab64fa2784c279ee589bf3dc8..91778b5c6125d0ca19d400ca2ebb5099fe37b5b3 100644 (file)
@@ -20,6 +20,8 @@
 # undef if
 #endif
 
+typedef unsigned int __bitwise fmode_t;
+
 #define FMODE_READ             0x1
 #define FMODE_WRITE            0x2
 
index 05e818a8bbad11a4f4bd27dec7f76f9390381f5e..009d6efb673ce828dd0762bf0d4ca1d4d4c9d0ae 100644 (file)
@@ -222,19 +222,7 @@ installed_files_bin := bin/perf
 installed_files_bin += etc/bash_completion.d/perf
 installed_files_bin += libexec/perf-core/perf-archive
 
-installed_files_plugins := $(lib)/traceevent/plugins/plugin_cfg80211.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_scsi.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_xen.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_function.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_sched_switch.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_mac80211.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_kvm.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_kmem.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_hrtimer.so
-installed_files_plugins += $(lib)/traceevent/plugins/plugin_jbd2.so
-
 installed_files_all := $(installed_files_bin)
-installed_files_all += $(installed_files_plugins)
 
 test_make_install       := $(call test_dest_files,$(installed_files_all))
 test_make_install_O     := $(call test_dest_files,$(installed_files_all))
index f05670d1e39eb93be6e8120f2f2ad3df8504a143..aaf851108ca35ef08682438643f241b16f1ee418 100755 (executable)
@@ -77,7 +77,20 @@ check()
        file=${build_id_dir}/.build-id/${id:0:2}/`readlink ${link}`/elf
        echo "file: ${file}"
 
-       if [ ! -x $file ]; then
+       # Check for file permission of original file
+       # in case of pe-file.exe file
+       echo $1 | grep ".exe"
+       if [ $? -eq 0 ]; then
+               if [ -x $1  -a ! -x $file ]; then
+                       echo "failed: file ${file} executable does not exist"
+                       exit 1
+               fi
+
+               if [ ! -x $file -a ! -e $file ]; then
+                       echo "failed: file ${file} does not exist"
+                       exit 1
+               fi
+       elif [ ! -x $file ]; then
                echo "failed: file ${file} does not exist"
                exit 1
        fi
index 34c400ccbe046b59813857d39ec6252fe404e1d3..57e7a6a470c93e3cd26a66ad98b4c88a81d7e806 100755 (executable)
@@ -37,6 +37,7 @@ trace_libc_inet_pton_backtrace() {
        case "$(uname -m)" in
        s390x)
                eventattr='call-graph=dwarf,max-stack=4'
+               echo "text_to_binary_address.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
                echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
                echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
                echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
@@ -57,7 +58,7 @@ trace_libc_inet_pton_backtrace() {
        perf_data=`mktemp -u /tmp/perf.data.XXX`
        perf_script=`mktemp -u /tmp/perf.script.XXX`
        perf record -e $event_name/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
-       perf script -i $perf_data > $perf_script
+       perf script -i $perf_data | tac | grep -m1 ^ping -B9 | tac > $perf_script
 
        exec 3<$perf_script
        exec 4<$expected
index de3701a2a2129dd6357f1910ec4878cb1005eaf9..13c3a237b9c934a534aa4a8513fea5e34864528d 100644 (file)
@@ -33,7 +33,10 @@ typedef __kernel_sa_family_t sa_family_t;
 
 struct sockaddr {
        sa_family_t     sa_family;      /* address family, AF_xxx       */
-       char            sa_data[14];    /* 14 bytes of protocol address */
+       union {
+               char sa_data_min[14];           /* Minimum 14 bytes of protocol address */
+               DECLARE_FLEX_ARRAY(char, sa_data);
+       };
 };
 
 struct linger {
index 3cc42821d9b397e379a007f98a110bf385a03034..d7dc7c28508c0aea7b2653a7bf0d48b850358842 100755 (executable)
@@ -19,7 +19,7 @@ TAG=
 if test -d ../../.git -o -f ../../.git
 then
        TAG=$(MAKEFLAGS= make -sC ../.. kernelversion)
-       CID=$(git log -1 --abbrev=12 --pretty=format:"%h" 2>/dev/null) && CID="-g$CID"
+       CID=$(git log -1 --abbrev=12 --pretty=format:"%h" --no-show-signature 2>/dev/null) && CID="-g$CID"
 elif test -f ../../PERF-VERSION-FILE
 then
        TAG=$(cut -d' ' -f3 ../../PERF-VERSION-FILE | sed -e 's/\"//g')
index 265d20cc126b13b09457d72c97d11bd22576cb77..c2e323cd7d496c737024c15b930756c80eb2b542 100644 (file)
@@ -2611,7 +2611,7 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start,
                                *size = sym->start - *start;
                        if (idx > 0) {
                                if (*size)
-                                       return 1;
+                                       return 0;
                        } else if (dso_sym_match(sym, sym_name, &cnt, idx)) {
                                print_duplicate_syms(dso, sym_name);
                                return -EINVAL;
index 4dbf26408b692a72c9d9e15cef1121188b659ca5..c6d21c07b14cd88c5e3c20c1292677e2eb868c8a 100644 (file)
@@ -4,9 +4,12 @@
 
 #include <linux/list.h>
 #include <sys/resource.h>
+
+#ifdef HAVE_LIBBPF_SUPPORT
 #include <bpf/bpf.h>
 #include <bpf/btf.h>
 #include <bpf/libbpf.h>
+#endif
 
 struct evsel;
 struct target;
@@ -87,6 +90,8 @@ static inline void set_max_rlimit(void)
        setrlimit(RLIMIT_MEMLOCK, &rinf);
 }
 
+#ifdef HAVE_BPF_SKEL
+
 static inline __u32 bpf_link_get_id(int fd)
 {
        struct bpf_link_info link_info = { .id = 0, };
@@ -127,5 +132,6 @@ static inline int bperf_trigger_reading(int prog_fd, int cpu)
 
        return bpf_prog_test_run_opts(prog_fd, &opts);
 }
+#endif /* HAVE_BPF_SKEL */
 
 #endif /* __PERF_BPF_COUNTER_H */
index 3c2df7522f6fcbdc63a918a6d322cd2c3afbcf56..1c82377ed78b99941df27d86afc8579ea5bfdf01 100644 (file)
@@ -116,27 +116,19 @@ static int bperf_load_program(struct evlist *evlist)
 
                        /* open single copy of the events w/o cgroup */
                        err = evsel__open_per_cpu(evsel, evsel->core.cpus, -1);
-                       if (err) {
-                               pr_err("Failed to open first cgroup events\n");
-                               goto out;
-                       }
+                       if (err == 0)
+                               evsel->supported = true;
 
                        map_fd = bpf_map__fd(skel->maps.events);
                        perf_cpu_map__for_each_cpu(cpu, j, evsel->core.cpus) {
                                int fd = FD(evsel, j);
                                __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
 
-                               err = bpf_map_update_elem(map_fd, &idx, &fd,
-                                                         BPF_ANY);
-                               if (err < 0) {
-                                       pr_err("Failed to update perf_event fd\n");
-                                       goto out;
-                               }
+                               bpf_map_update_elem(map_fd, &idx, &fd, BPF_ANY);
                        }
 
                        evsel->cgrp = leader_cgrp;
                }
-               evsel->supported = true;
 
                if (evsel->cgrp == cgrp)
                        continue;
index a839b30c981b7b07bcbd2993970bea98131938b8..ea9c083ab1e3fa65f84702b69f77d56213191cd7 100644 (file)
@@ -715,9 +715,13 @@ build_id_cache__add(const char *sbuild_id, const char *name, const char *realnam
                } else if (nsi && nsinfo__need_setns(nsi)) {
                        if (copyfile_ns(name, filename, nsi))
                                goto out_free;
-               } else if (link(realname, filename) && errno != EEXIST &&
-                               copyfile(name, filename))
-                       goto out_free;
+               } else if (link(realname, filename) && errno != EEXIST) {
+                       struct stat f_stat;
+
+                       if (!(stat(name, &f_stat) < 0) &&
+                                       copyfile_mode(name, filename, f_stat.st_mode))
+                               goto out_free;
+               }
        }
 
        /* Some binaries are stripped, but have .debug files with their symbol
index e99b41f9be45a401b9e34d24f14d602043dd4924..cd978c240e0ddbedbd0eaffec61458b75dc4ba27 100644 (file)
@@ -224,6 +224,19 @@ static int add_cgroup_name(const char *fpath, const struct stat *sb __maybe_unus
        return 0;
 }
 
+static int check_and_add_cgroup_name(const char *fpath)
+{
+       struct cgroup_name *cn;
+
+       list_for_each_entry(cn, &cgroup_list, list) {
+               if (!strcmp(cn->name, fpath))
+                       return 0;
+       }
+
+       /* pretend if it's added by ftw() */
+       return add_cgroup_name(fpath, NULL, FTW_D, NULL);
+}
+
 static void release_cgroup_list(void)
 {
        struct cgroup_name *cn;
@@ -242,7 +255,7 @@ static int list_cgroups(const char *str)
        struct cgroup_name *cn;
        char *s;
 
-       /* use given name as is - for testing purpose */
+       /* use given name as is when no regex is given */
        for (;;) {
                p = strchr(str, ',');
                e = p ? p : eos;
@@ -253,13 +266,13 @@ static int list_cgroups(const char *str)
                        s = strndup(str, e - str);
                        if (!s)
                                return -1;
-                       /* pretend if it's added by ftw() */
-                       ret = add_cgroup_name(s, NULL, FTW_D, NULL);
+
+                       ret = check_and_add_cgroup_name(s);
                        free(s);
-                       if (ret)
+                       if (ret < 0)
                                return -1;
                } else {
-                       if (add_cgroup_name("", NULL, FTW_D, NULL) < 0)
+                       if (check_and_add_cgroup_name("/") < 0)
                                return -1;
                }
 
index a7f68c309545de236bdbfaf816a5d00f56a7617b..fc16299c915f9b51c79f79a77307c35b8dabbe57 100644 (file)
@@ -132,6 +132,7 @@ int perf_data__open_dir(struct perf_data *data)
                file->size = st.st_size;
        }
 
+       closedir(dir);
        if (!files)
                return -EINVAL;
 
@@ -140,6 +141,7 @@ int perf_data__open_dir(struct perf_data *data)
        return 0;
 
 out_err:
+       closedir(dir);
        close_dir(files, nr);
        return ret;
 }
index 0168a96373309b712002fa91e40d6efe061813c0..d47de5f270a8e94e0f977a9d2fbe43713b4e5b21 100644 (file)
@@ -42,8 +42,11 @@ static char *normalize(char *str, int runtime)
        char *dst = str;
 
        while (*str) {
-               if (*str == '\\')
+               if (*str == '\\') {
                        *dst++ = *++str;
+                       if (!*str)
+                               break;
+               }
                else if (*str == '?') {
                        char *paramval;
                        int i = 0;
index c3cef36d417683581ed7405ac842854057f8ff7b..1b5140e5ce9975fac87b2674dc694f9d4e439a5f 100755 (executable)
@@ -38,7 +38,7 @@ do
 done
 echo "#endif /* HAVE_LIBELF_SUPPORT */"
 
-echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)"
+echo "#if defined(HAVE_LIBTRACEEVENT) && (defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT))"
 sed -n -e 's/^perf-\([^        ]*\)[   ].* audit*/\1/p' command-list.txt |
 sort |
 while read cmd
@@ -51,5 +51,20 @@ do
            p
      }' "Documentation/perf-$cmd.txt"
 done
-echo "#endif /* HAVE_LIBELF_SUPPORT */"
+echo "#endif /* HAVE_LIBTRACEEVENT && (HAVE_LIBAUDIT_SUPPORT || HAVE_SYSCALL_TABLE_SUPPORT) */"
+
+echo "#ifdef HAVE_LIBTRACEEVENT"
+sed -n -e 's/^perf-\([^        ]*\)[   ].* traceevent.*/\1/p' command-list.txt |
+sort |
+while read cmd
+do
+     sed -n '
+     /^NAME/,/perf-'"$cmd"'/H
+     ${
+            x
+            s/.*perf-'"$cmd"' - \(.*\)/  {"'"$cmd"'", "\1"},/
+            p
+     }' "Documentation/perf-$cmd.txt"
+done
+echo "#endif /* HAVE_LIBTRACEEVENT */"
 echo "};"
index e188f74698dd310af74a6029392766e5b9c1d99c..37662cdec5eefd183bd95c2c277777294a1046bb 100644 (file)
@@ -2971,6 +2971,18 @@ static int add_dynamic_entry(struct evlist *evlist, const char *tok,
                ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
                goto out;
        }
+#else
+       evlist__for_each_entry(evlist, evsel) {
+               if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
+                       pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
+                       ret = -ENOTSUP;
+               }
+       }
+
+       if (ret) {
+               pr_err("\n");
+               goto out;
+       }
 #endif
 
        evsel = find_evsel(evlist, event_name);
index add6c5d9531cdf91253dd01b9bb4b18c5d3951f5..9b3cd79cca1217951f3c892143f0e71d541d4a62 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <stdbool.h>
 #include <stdio.h>
+#include <sys/types.h>
 #include <linux/types.h>
 
 struct evlist;
index 654338e0be52eb5b0dfd1748c415707341b21ce2..4cc7cd5aac2b10a7ccbf5278b2773a339ee3b9f2 100644 (file)
@@ -1,4 +1,5 @@
 main
 memblock.c
 linux/memblock.h
+asm/asm.h
 asm/cmpxchg.h
index 2310ac4d080ec68fdb7582d85da486643b93492a..7a1ca694a9825c4ea8fc19e85945b3627bb2d75f 100644 (file)
@@ -29,13 +29,14 @@ include: ../../../include/linux/memblock.h ../../include/linux/*.h \
 
        @mkdir -p linux
        test -L linux/memblock.h || ln -s ../../../../include/linux/memblock.h linux/memblock.h
+       test -L asm/asm.h || ln -s ../../../arch/x86/include/asm/asm.h asm/asm.h
        test -L asm/cmpxchg.h || ln -s ../../../arch/x86/include/asm/cmpxchg.h asm/cmpxchg.h
 
 memblock.c: $(EXTR_SRC)
        test -L memblock.c || ln -s $(EXTR_SRC) memblock.c
 
 clean:
-       $(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/cmpxchg.h
+       $(RM) $(TARGETS) $(OFILES) linux/memblock.h memblock.c asm/asm.h asm/cmpxchg.h
 
 help:
        @echo  'Memblock simulator'
index fdb7f5db730822e93d7732026218609cda4a054c..85973e55489e75f1d7e66199f1b616dd299c46bd 100644 (file)
@@ -15,6 +15,10 @@ bool mirrored_kernelcore = false;
 
 struct page {};
 
+void __free_pages_core(struct page *page, unsigned int order)
+{
+}
+
 void memblock_free_pages(struct page *page, unsigned long pfn,
                         unsigned int order)
 {
index 07d2d0a8c5cb4dae24dedd1e3b894d2dfa294197..401a75844cc0e71ce9fa4464433b5a45f4f38517 100644 (file)
@@ -36,6 +36,7 @@ test_cpp
 *.lskel.h
 /no_alu32
 /bpf_gcc
+/host-tools
 /tools
 /runqslower
 /bench
index 585fcf73c73146db5e8375a7ddc6eeddd43a5189..3fc3e54b19aad4d94b114f46532a8af3038f07f4 100644 (file)
@@ -14,6 +14,7 @@ cgrp_kfunc                               # JIT does not support calling kernel f
 cgrp_local_storage                       # prog_attach unexpected error: -524                                          (trampoline)
 core_read_macros                         # unknown func bpf_probe_read#4                                               (overlapping)
 d_path                                   # failed to auto-attach program 'prog_stat': -524                             (trampoline)
+decap_sanity                             # JIT does not support calling kernel function                                (kfunc)
 deny_namespace                           # failed to attach: ERROR: strerror_r(-524)=22                                (trampoline)
 dummy_st_ops                             # test_run unexpected error: -524 (errno 524)                                 (trampoline)
 fentry_fexit                             # fentry attach failed: -524                                                  (trampoline)
index 6f8ed61fc4b4a8696779afa8e746d9ed6541e638..3af6450763e9ac946c8b7f83f5cb509ced004b6e 100644 (file)
@@ -1465,6 +1465,77 @@ out:
        bpf_iter_task_vma__destroy(skel);
 }
 
+static void test_task_vma_dead_task(void)
+{
+       struct bpf_iter_task_vma *skel;
+       int wstatus, child_pid = -1;
+       time_t start_tm, cur_tm;
+       int err, iter_fd = -1;
+       int wait_sec = 3;
+
+       skel = bpf_iter_task_vma__open();
+       if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vma__open"))
+               return;
+
+       skel->bss->pid = getpid();
+
+       err = bpf_iter_task_vma__load(skel);
+       if (!ASSERT_OK(err, "bpf_iter_task_vma__load"))
+               goto out;
+
+       skel->links.proc_maps = bpf_program__attach_iter(
+               skel->progs.proc_maps, NULL);
+
+       if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
+               skel->links.proc_maps = NULL;
+               goto out;
+       }
+
+       start_tm = time(NULL);
+       cur_tm = start_tm;
+
+       child_pid = fork();
+       if (child_pid == 0) {
+               /* Fork short-lived processes in the background. */
+               while (cur_tm < start_tm + wait_sec) {
+                       system("echo > /dev/null");
+                       cur_tm = time(NULL);
+               }
+               exit(0);
+       }
+
+       if (!ASSERT_GE(child_pid, 0, "fork_child"))
+               goto out;
+
+       while (cur_tm < start_tm + wait_sec) {
+               iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
+               if (!ASSERT_GE(iter_fd, 0, "create_iter"))
+                       goto out;
+
+               /* Drain all data from iter_fd. */
+               while (cur_tm < start_tm + wait_sec) {
+                       err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
+                       if (!ASSERT_GE(err, 0, "read_iter_fd"))
+                               goto out;
+
+                       cur_tm = time(NULL);
+
+                       if (err == 0)
+                               break;
+               }
+
+               close(iter_fd);
+               iter_fd = -1;
+       }
+
+       check_bpf_link_info(skel->progs.proc_maps);
+
+out:
+       waitpid(child_pid, &wstatus, 0);
+       close(iter_fd);
+       bpf_iter_task_vma__destroy(skel);
+}
+
 void test_bpf_sockmap_map_iter_fd(void)
 {
        struct bpf_iter_sockmap *skel;
@@ -1586,6 +1657,8 @@ void test_bpf_iter(void)
                test_task_file();
        if (test__start_subtest("task_vma"))
                test_task_vma();
+       if (test__start_subtest("task_vma_dead_task"))
+               test_task_vma_dead_task();
        if (test__start_subtest("task_btf"))
                test_task_btf();
        if (test__start_subtest("tcp4"))
index 0ba2e8b9c6ace7759f6866dd20d6a10519e1abc3..e9ea38aa8248bed1859080d1ba9da9ca63727f01 100644 (file)
@@ -801,7 +801,7 @@ static void test_btf_dump_struct_data(struct btf *btf, struct btf_dump *d,
 static void test_btf_dump_var_data(struct btf *btf, struct btf_dump *d,
                                   char *str)
 {
-#if defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
+#if 0
        TEST_BTF_DUMP_VAR(btf, d, NULL, str, "cpu_number", int, BTF_F_COMPACT,
                          "int cpu_number = (int)100", 100);
 #endif
diff --git a/tools/testing/selftests/bpf/prog_tests/decap_sanity.c b/tools/testing/selftests/bpf/prog_tests/decap_sanity.c
new file mode 100644 (file)
index 0000000..0b2f73b
--- /dev/null
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <linux/in6.h>
+
+#include "test_progs.h"
+#include "network_helpers.h"
+#include "decap_sanity.skel.h"
+
+#define SYS(fmt, ...)                                          \
+       ({                                                      \
+               char cmd[1024];                                 \
+               snprintf(cmd, sizeof(cmd), fmt, ##__VA_ARGS__); \
+               if (!ASSERT_OK(system(cmd), cmd))               \
+                       goto fail;                              \
+       })
+
+#define NS_TEST "decap_sanity_ns"
+#define IPV6_IFACE_ADDR "face::1"
+#define UDP_TEST_PORT 7777
+
+void test_decap_sanity(void)
+{
+       LIBBPF_OPTS(bpf_tc_hook, qdisc_hook, .attach_point = BPF_TC_EGRESS);
+       LIBBPF_OPTS(bpf_tc_opts, tc_attach);
+       struct nstoken *nstoken = NULL;
+       struct decap_sanity *skel;
+       struct sockaddr_in6 addr;
+       socklen_t addrlen;
+       char buf[128] = {};
+       int sockfd, err;
+
+       skel = decap_sanity__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel open_and_load"))
+               return;
+
+       SYS("ip netns add %s", NS_TEST);
+       SYS("ip -net %s -6 addr add %s/128 dev lo nodad", NS_TEST, IPV6_IFACE_ADDR);
+       SYS("ip -net %s link set dev lo up", NS_TEST);
+
+       nstoken = open_netns(NS_TEST);
+       if (!ASSERT_OK_PTR(nstoken, "open_netns"))
+               goto fail;
+
+       qdisc_hook.ifindex = if_nametoindex("lo");
+       if (!ASSERT_GT(qdisc_hook.ifindex, 0, "if_nametoindex lo"))
+               goto fail;
+
+       err = bpf_tc_hook_create(&qdisc_hook);
+       if (!ASSERT_OK(err, "create qdisc hook"))
+               goto fail;
+
+       tc_attach.prog_fd = bpf_program__fd(skel->progs.decap_sanity);
+       err = bpf_tc_attach(&qdisc_hook, &tc_attach);
+       if (!ASSERT_OK(err, "attach filter"))
+               goto fail;
+
+       addrlen = sizeof(addr);
+       err = make_sockaddr(AF_INET6, IPV6_IFACE_ADDR, UDP_TEST_PORT,
+                           (void *)&addr, &addrlen);
+       if (!ASSERT_OK(err, "make_sockaddr"))
+               goto fail;
+       sockfd = socket(AF_INET6, SOCK_DGRAM, 0);
+       if (!ASSERT_NEQ(sockfd, -1, "socket"))
+               goto fail;
+       err = sendto(sockfd, buf, sizeof(buf), 0, (void *)&addr, addrlen);
+       close(sockfd);
+       if (!ASSERT_EQ(err, sizeof(buf), "send"))
+               goto fail;
+
+       ASSERT_TRUE(skel->bss->init_csum_partial, "init_csum_partial");
+       ASSERT_TRUE(skel->bss->final_csum_none, "final_csum_none");
+       ASSERT_FALSE(skel->bss->broken_csum_start, "broken_csum_start");
+
+fail:
+       if (nstoken) {
+               bpf_tc_hook_destroy(&qdisc_hook);
+               close_netns(nstoken);
+       }
+       system("ip netns del " NS_TEST " >& /dev/null");
+       decap_sanity__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c b/tools/testing/selftests/bpf/prog_tests/jeq_infer_not_null.c
new file mode 100644 (file)
index 0000000..3add34d
--- /dev/null
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <test_progs.h>
+#include "jeq_infer_not_null_fail.skel.h"
+
+void test_jeq_infer_not_null(void)
+{
+       RUN_TESTS(jeq_infer_not_null_fail);
+}
index b394817126cf9e5fe12afd30fc4ede4733522ea6..cfed4df490f35b23d808c6ae427d79ab2f3cee58 100644 (file)
 #define ICSK_TIME_LOSS_PROBE   5
 #define ICSK_TIME_REO_TIMEOUT  6
 
+#define ETH_HLEN               14
+#define ETH_P_IPV6             0x86DD
+
+#define CHECKSUM_NONE          0
+#define CHECKSUM_PARTIAL       3
+
 #define IFNAMSIZ               16
 
 #define RTF_GATEWAY            0x0002
diff --git a/tools/testing/selftests/bpf/progs/decap_sanity.c b/tools/testing/selftests/bpf/progs/decap_sanity.c
new file mode 100644 (file)
index 0000000..bd3c657
--- /dev/null
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include "vmlinux.h"
+#include "bpf_tracing_net.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_endian.h>
+
+#define UDP_TEST_PORT 7777
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+bool init_csum_partial = false;
+bool final_csum_none = false;
+bool broken_csum_start = false;
+
+static unsigned int skb_headlen(const struct sk_buff *skb)
+{
+       return skb->len - skb->data_len;
+}
+
+static unsigned int skb_headroom(const struct sk_buff *skb)
+{
+       return skb->data - skb->head;
+}
+
+static int skb_checksum_start_offset(const struct sk_buff *skb)
+{
+       return skb->csum_start - skb_headroom(skb);
+}
+
+SEC("tc")
+int decap_sanity(struct __sk_buff *skb)
+{
+       struct sk_buff *kskb;
+       struct ipv6hdr ip6h;
+       struct udphdr udph;
+       int err;
+
+       if (skb->protocol != __bpf_constant_htons(ETH_P_IPV6))
+               return TC_ACT_SHOT;
+
+       if (bpf_skb_load_bytes(skb, ETH_HLEN, &ip6h, sizeof(ip6h)))
+               return TC_ACT_SHOT;
+
+       if (ip6h.nexthdr != IPPROTO_UDP)
+               return TC_ACT_SHOT;
+
+       if (bpf_skb_load_bytes(skb, ETH_HLEN + sizeof(ip6h), &udph, sizeof(udph)))
+               return TC_ACT_SHOT;
+
+       if (udph.dest != __bpf_constant_htons(UDP_TEST_PORT))
+               return TC_ACT_SHOT;
+
+       kskb = bpf_cast_to_kern_ctx(skb);
+       init_csum_partial = (kskb->ip_summed == CHECKSUM_PARTIAL);
+       err = bpf_skb_adjust_room(skb, -(s32)(ETH_HLEN + sizeof(ip6h) + sizeof(udph)),
+                                 1, BPF_F_ADJ_ROOM_FIXED_GSO);
+       if (err)
+               return TC_ACT_SHOT;
+       final_csum_none = (kskb->ip_summed == CHECKSUM_NONE);
+       if (kskb->ip_summed == CHECKSUM_PARTIAL &&
+           (unsigned int)skb_checksum_start_offset(kskb) >= skb_headlen(kskb))
+               broken_csum_start = true;
+
+       return TC_ACT_SHOT;
+}
+
+char __license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c b/tools/testing/selftests/bpf/progs/jeq_infer_not_null_fail.c
new file mode 100644 (file)
index 0000000..f469650
--- /dev/null
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(max_entries, 1);
+       __type(key, u64);
+       __type(value, u64);
+} m_hash SEC(".maps");
+
+SEC("?raw_tp")
+__failure __msg("R8 invalid mem access 'map_value_or_null")
+int jeq_infer_not_null_ptr_to_btfid(void *ctx)
+{
+       struct bpf_map *map = (struct bpf_map *)&m_hash;
+       struct bpf_map *inner_map = map->inner_map_meta;
+       u64 key = 0, ret = 0, *val;
+
+       val = bpf_map_lookup_elem(map, &key);
+       /* Do not mark ptr as non-null if one of them is
+        * PTR_TO_BTF_ID (R9), reject because of invalid
+        * access to map value (R8).
+        *
+        * Here, we need to inline those insns to access
+        * R8 directly, since compiler may use other reg
+        * once it figures out val==inner_map.
+        */
+       asm volatile("r8 = %[val];\n"
+                    "r9 = %[inner_map];\n"
+                    "if r8 != r9 goto +1;\n"
+                    "%[ret] = *(u64 *)(r8 +0);\n"
+                    : [ret] "+r"(ret)
+                    : [inner_map] "r"(inner_map), [val] "r"(val)
+                    : "r8", "r9");
+
+       return ret;
+}
index 6ce8c488d62eacd620181f5d7b5bfb89b23efcb7..6d9381d60172f39e9557f76179e420e69209d358 100644 (file)
@@ -1,86 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0-only
-/aarch64/aarch32_id_regs
-/aarch64/arch_timer
-/aarch64/debug-exceptions
-/aarch64/get-reg-list
-/aarch64/hypercalls
-/aarch64/page_fault_test
-/aarch64/psci_test
-/aarch64/vcpu_width_config
-/aarch64/vgic_init
-/aarch64/vgic_irq
-/s390x/memop
-/s390x/resets
-/s390x/sync_regs_test
-/s390x/tprot
-/x86_64/amx_test
-/x86_64/cpuid_test
-/x86_64/cr4_cpuid_sync_test
-/x86_64/debug_regs
-/x86_64/exit_on_emulation_failure_test
-/x86_64/fix_hypercall_test
-/x86_64/get_msr_index_features
-/x86_64/kvm_clock_test
-/x86_64/kvm_pv_test
-/x86_64/hyperv_clock
-/x86_64/hyperv_cpuid
-/x86_64/hyperv_evmcs
-/x86_64/hyperv_features
-/x86_64/hyperv_ipi
-/x86_64/hyperv_svm_test
-/x86_64/hyperv_tlb_flush
-/x86_64/max_vcpuid_cap_test
-/x86_64/mmio_warning_test
-/x86_64/monitor_mwait_test
-/x86_64/nested_exceptions_test
-/x86_64/nx_huge_pages_test
-/x86_64/platform_info_test
-/x86_64/pmu_event_filter_test
-/x86_64/set_boot_cpu_id
-/x86_64/set_sregs_test
-/x86_64/sev_migrate_tests
-/x86_64/smaller_maxphyaddr_emulation_test
-/x86_64/smm_test
-/x86_64/state_test
-/x86_64/svm_vmcall_test
-/x86_64/svm_int_ctl_test
-/x86_64/svm_nested_soft_inject_test
-/x86_64/svm_nested_shutdown_test
-/x86_64/sync_regs_test
-/x86_64/tsc_msrs_test
-/x86_64/tsc_scaling_sync
-/x86_64/ucna_injection_test
-/x86_64/userspace_io_test
-/x86_64/userspace_msr_exit_test
-/x86_64/vmx_apic_access_test
-/x86_64/vmx_close_while_nested_test
-/x86_64/vmx_dirty_log_test
-/x86_64/vmx_exception_with_invalid_guest_state
-/x86_64/vmx_invalid_nested_guest_state
-/x86_64/vmx_msrs_test
-/x86_64/vmx_preemption_timer_test
-/x86_64/vmx_set_nested_state_test
-/x86_64/vmx_tsc_adjust_test
-/x86_64/vmx_nested_tsc_scaling_test
-/x86_64/xapic_ipi_test
-/x86_64/xapic_state_test
-/x86_64/xen_shinfo_test
-/x86_64/xen_vmcall_test
-/x86_64/xss_msr_test
-/x86_64/vmx_pmu_caps_test
-/x86_64/triple_fault_event_test
-/access_tracking_perf_test
-/demand_paging_test
-/dirty_log_test
-/dirty_log_perf_test
-/hardware_disable_test
-/kvm_create_max_vcpus
-/kvm_page_table_test
-/max_guest_memory_test
-/memslot_modification_stress_test
-/memslot_perf_test
-/rseq_test
-/set_memory_region_test
-/steal_time
-/kvm_binary_stats_test
-/system_counter_offset_test
+*
+!/**/
+!*.c
+!*.h
+!*.S
+!*.sh
index 947676983da1f2bff7102d66e18450861a6a97e9..1750f91dd936291a524cc89859a09ea0c1aea46b 100644 (file)
@@ -7,35 +7,14 @@ top_srcdir = ../../../..
 include $(top_srcdir)/scripts/subarch.include
 ARCH            ?= $(SUBARCH)
 
-# For cross-builds to work, UNAME_M has to map to ARCH and arch specific
-# directories and targets in this Makefile. "uname -m" doesn't map to
-# arch specific sub-directory names.
-#
-# UNAME_M variable to used to run the compiles pointing to the right arch
-# directories and build the right targets for these supported architectures.
-#
-# TEST_GEN_PROGS and LIBKVM are set using UNAME_M variable.
-# LINUX_TOOL_ARCH_INCLUDE is set using ARCH variable.
-#
-# x86_64 targets are named to include x86_64 as a suffix and directories
-# for includes are in x86_64 sub-directory. s390x and aarch64 follow the
-# same convention. "uname -m" doesn't result in the correct mapping for
-# s390x and aarch64.
-#
-# No change necessary for x86_64
-UNAME_M := $(shell uname -m)
-
-# Set UNAME_M for arm64 compile/install to work
-ifeq ($(ARCH),arm64)
-       UNAME_M := aarch64
-endif
-# Set UNAME_M s390x compile/install to work
-ifeq ($(ARCH),s390)
-       UNAME_M := s390x
-endif
-# Set UNAME_M riscv compile/install to work
-ifeq ($(ARCH),riscv)
-       UNAME_M := riscv
+ifeq ($(ARCH),x86)
+       ARCH_DIR := x86_64
+else ifeq ($(ARCH),arm64)
+       ARCH_DIR := aarch64
+else ifeq ($(ARCH),s390)
+       ARCH_DIR := s390x
+else
+       ARCH_DIR := $(ARCH)
 endif
 
 LIBKVM += lib/assert.c
@@ -196,10 +175,15 @@ TEST_GEN_PROGS_riscv += kvm_page_table_test
 TEST_GEN_PROGS_riscv += set_memory_region_test
 TEST_GEN_PROGS_riscv += kvm_binary_stats_test
 
-TEST_PROGS += $(TEST_PROGS_$(UNAME_M))
-TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(UNAME_M))
-TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(UNAME_M))
-LIBKVM += $(LIBKVM_$(UNAME_M))
+TEST_PROGS += $(TEST_PROGS_$(ARCH_DIR))
+TEST_GEN_PROGS += $(TEST_GEN_PROGS_$(ARCH_DIR))
+TEST_GEN_PROGS_EXTENDED += $(TEST_GEN_PROGS_EXTENDED_$(ARCH_DIR))
+LIBKVM += $(LIBKVM_$(ARCH_DIR))
+
+# lib.mak defines $(OUTPUT), prepends $(OUTPUT)/ to $(TEST_GEN_PROGS), and most
+# importantly defines, i.e. overwrites, $(CC) (unless `make -e` or `make CC=`,
+# which causes the environment variable to override the makefile).
+include ../lib.mk
 
 INSTALL_HDR_PATH = $(top_srcdir)/usr
 LINUX_HDR_PATH = $(INSTALL_HDR_PATH)/include/
@@ -210,25 +194,23 @@ else
 LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include
 endif
 CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
+       -Wno-gnu-variable-sized-type-not-at-end \
+       -fno-builtin-memcmp -fno-builtin-memcpy -fno-builtin-memset \
        -fno-stack-protector -fno-PIE -I$(LINUX_TOOL_INCLUDE) \
        -I$(LINUX_TOOL_ARCH_INCLUDE) -I$(LINUX_HDR_PATH) -Iinclude \
-       -I$(<D) -Iinclude/$(UNAME_M) -I ../rseq -I.. $(EXTRA_CFLAGS) \
+       -I$(<D) -Iinclude/$(ARCH_DIR) -I ../rseq -I.. $(EXTRA_CFLAGS) \
        $(KHDR_INCLUDES)
 
-no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
-        $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
+no-pie-option := $(call try-run, echo 'int main(void) { return 0; }' | \
+        $(CC) -Werror $(CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
 
 # On s390, build the testcases KVM-enabled
-pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
+pgste-option = $(call try-run, echo 'int main(void) { return 0; }' | \
        $(CC) -Werror -Wl$(comma)--s390-pgste -x c - -o "$$TMP",-Wl$(comma)--s390-pgste)
 
 LDLIBS += -ldl
 LDFLAGS += -pthread $(no-pie-option) $(pgste-option)
 
-# After inclusion, $(OUTPUT) is defined and
-# $(TEST_GEN_PROGS) starts with $(OUTPUT)/
-include ../lib.mk
-
 LIBKVM_C := $(filter %.c,$(LIBKVM))
 LIBKVM_S := $(filter %.S,$(LIBKVM))
 LIBKVM_C_OBJ := $(patsubst %.c, $(OUTPUT)/%.o, $(LIBKVM_C))
index 95d22cfb7b41a2cf72ffd66b1113856250946eb9..beb944fa6fd4692fddad8875ef16fb2469c8ab74 100644 (file)
@@ -117,7 +117,7 @@ static void guest_cas(void)
        GUEST_ASSERT(guest_check_lse());
        asm volatile(".arch_extension lse\n"
                     "casal %0, %1, [%2]\n"
-                    :: "r" (0), "r" (TEST_DATA), "r" (guest_test_memory));
+                    :: "r" (0ul), "r" (TEST_DATA), "r" (guest_test_memory));
        val = READ_ONCE(*guest_test_memory);
        GUEST_ASSERT_EQ(val, TEST_DATA);
 }
index 562c16dfbb002322951f10820d68624edebc94e6..f212bd8ab93d80cce19a2a25d8a2162b3d145a4d 100644 (file)
@@ -14,11 +14,13 @@ static vm_vaddr_t *ucall_exit_mmio_addr;
 
 void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa)
 {
-       virt_pg_map(vm, mmio_gpa, mmio_gpa);
+       vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR);
+
+       virt_map(vm, mmio_gva, mmio_gpa, 1);
 
        vm->ucall_mmio_addr = mmio_gpa;
 
-       write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gpa);
+       write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva);
 }
 
 void ucall_arch_do_ucall(vm_vaddr_t uc)
index c88c3ace16d2fcc09a26fc1f51155010fba05460..56d5ea949cbbe9c272976b75e166d02ecc5db088 100644 (file)
@@ -186,6 +186,15 @@ const struct vm_guest_mode_params vm_guest_mode_params[] = {
 _Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
               "Missing new mode params?");
 
+/*
+ * Initializes vm->vpages_valid to match the canonical VA space of the
+ * architecture.
+ *
+ * The default implementation is valid for architectures which split the
+ * range addressed by a single page table into a low and high region
+ * based on the MSB of the VA. On architectures with this behavior
+ * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1].
+ */
 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm)
 {
        sparsebit_set_num(vm->vpages_valid,
@@ -1416,10 +1425,10 @@ void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 
        while (npages--) {
                virt_pg_map(vm, vaddr, paddr);
+               sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
+
                vaddr += page_size;
                paddr += page_size;
-
-               sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift);
        }
 }
 
index 0cc0971ce60e3a58414c36c96ce1acbbe9c33601..2f0e2ea941cc66c2d27e74d0e734be274d060674 100644 (file)
@@ -4,6 +4,8 @@
 #include "linux/bitmap.h"
 #include "linux/atomic.h"
 
+#define GUEST_UCALL_FAILED -1
+
 struct ucall_header {
        DECLARE_BITMAP(in_use, KVM_MAX_VCPUS);
        struct ucall ucalls[KVM_MAX_VCPUS];
@@ -41,7 +43,8 @@ static struct ucall *ucall_alloc(void)
        struct ucall *uc;
        int i;
 
-       GUEST_ASSERT(ucall_pool);
+       if (!ucall_pool)
+               goto ucall_failed;
 
        for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                if (!test_and_set_bit(i, ucall_pool->in_use)) {
@@ -51,7 +54,13 @@ static struct ucall *ucall_alloc(void)
                }
        }
 
-       GUEST_ASSERT(0);
+ucall_failed:
+       /*
+        * If the vCPU cannot grab a ucall structure, make a bare ucall with a
+        * magic value to signal to get_ucall() that things went sideways.
+        * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here.
+        */
+       ucall_arch_do_ucall(GUEST_UCALL_FAILED);
        return NULL;
 }
 
@@ -93,6 +102,9 @@ uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc)
 
        addr = ucall_arch_get_ucall(vcpu);
        if (addr) {
+               TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED,
+                           "Guest failed to allocate ucall struct");
+
                memcpy(uc, addr, sizeof(*uc));
                vcpu_run_complete_io(vcpu);
        } else {
index c4d368d56cfe2bae712e621a3536eb0445c9c3f9..acfa1d01e7df08ec06364a945ab23c85164b55cd 100644 (file)
@@ -1031,7 +1031,7 @@ bool is_amd_cpu(void)
 void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
 {
        if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) {
-               *pa_bits == kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
+               *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32;
                *va_bits = 32;
        } else {
                *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR);
index e698306bf49d1d7745d0337f7f0350911c886add..e6587e19349051f9922b166b1b07e3dbf4024df5 100644 (file)
@@ -265,6 +265,9 @@ static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
        slots = data->nslots;
        while (--slots > 1) {
                pages_per_slot = mempages / slots;
+               if (!pages_per_slot)
+                       continue;
+
                rempages = mempages % pages_per_slot;
                if (check_slot_pages(host_page_size, guest_page_size,
                                     pages_per_slot, rempages))
index 8b791eac7d5a37f90b450249800d0aeecbbc22c2..0cbb0e646ef8d7e7e804b730c773dcfd497a0658 100644 (file)
@@ -193,8 +193,9 @@ static void sender_guest_code(void *hcall_page, vm_vaddr_t pgs_gpa)
        GUEST_SYNC(stage++);
        /*
         * 'XMM Fast' HvCallSendSyntheticClusterIpiEx to HV_GENERIC_SET_ALL.
-        * Nothing to write anything to XMM regs.
         */
+       ipi_ex->vp_set.valid_bank_mask = 0;
+       hyperv_write_xmm_input(&ipi_ex->vp_set.valid_bank_mask, 2);
        hyperv_hypercall(HVCALL_SEND_IPI_EX | HV_HYPERCALL_FAST_BIT,
                         IPI_VECTOR, HV_GENERIC_SET_ALL);
        nop_loop();
index e497ace629c1924ca1b6c0664a565057b5ed5582..b34980d45648a68de61051cd521f49b21e8fa719 100644 (file)
@@ -41,8 +41,17 @@ static void guest_int_handler(struct ex_regs *regs)
 static void l2_guest_code_int(void)
 {
        GUEST_ASSERT_1(int_fired == 1, int_fired);
-       vmmcall();
-       ud2();
+
+       /*
+         * Same as the vmmcall() function, but with a ud2 sneaked after the
+         * vmmcall.  The caller injects an exception with the return address
+         * increased by 2, so the "pop rbp" must be after the ud2 and we cannot
+        * use vmmcall() directly.
+         */
+       __asm__ __volatile__("push %%rbp; vmmcall; ud2; pop %%rbp"
+                             : : "a"(0xdeadbeef), "c"(0xbeefdead)
+                             : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
+                               "r10", "r11", "r12", "r13", "r14", "r15");
 
        GUEST_ASSERT_1(bp_fired == 1, bp_fired);
        hlt();
index 5943187e859494301a332749c2c321ed0ba00962..ff8ecdf32ae07d9e448e32af6839943a1ed3d51b 100644 (file)
@@ -49,11 +49,6 @@ enum {
        NUM_VMX_PAGES,
 };
 
-struct kvm_single_msr {
-       struct kvm_msrs header;
-       struct kvm_msr_entry entry;
-} __attribute__((packed));
-
 /* The virtual machine object. */
 static struct kvm_vm *vm;
 
index 721f6a693799b5f96eeb9bf16ec9c1386ea28bdc..dae510c263b45b6e14d81524cf42893edea2c49c 100644 (file)
@@ -962,6 +962,12 @@ int main(int argc, char *argv[])
        }
 
  done:
+       struct kvm_xen_hvm_attr evt_reset = {
+               .type = KVM_XEN_ATTR_TYPE_EVTCHN,
+               .u.evtchn.flags = KVM_XEN_EVTCHN_RESET,
+       };
+       vm_ioctl(vm, KVM_XEN_HVM_SET_ATTR, &evt_reset);
+
        alarm(0);
        clock_gettime(CLOCK_REALTIME, &max_ts);
 
index 291144c284fbcb4139549ea470402d84a206af23..f7900e75d2306d2ff79d930e78a1c9110c69b48b 100644 (file)
@@ -20,7 +20,7 @@ CLANG_TARGET_FLAGS              := $(CLANG_TARGET_FLAGS_$(ARCH))
 
 ifeq ($(CROSS_COMPILE),)
 ifeq ($(CLANG_TARGET_FLAGS),)
-$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk
+$(error Specify CROSS_COMPILE or add '--target=' option to lib.mk)
 else
 CLANG_FLAGS     += --target=$(CLANG_TARGET_FLAGS)
 endif # CLANG_TARGET_FLAGS
index 9cc84114741d388f7b7e9c979856c69418a0ea32..a6911cae368c77b49f0083def57bbfdac75463b5 100644 (file)
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 bind_bhash
+bind_timewait
 csum
 cmsg_sender
 diag_uid
index b57e91e1c3f28fa1630bcd39c6b4254f8fda96c5..532459a15067cc5933caa8d7bd31a3c0bef406b4 100644 (file)
@@ -124,7 +124,7 @@ void producer(struct sockaddr_un *consumer_addr)
 
        wait_for_signal(pipefd[0]);
        if (connect(cfd, (struct sockaddr *)consumer_addr,
-                    sizeof(struct sockaddr)) != 0) {
+                    sizeof(*consumer_addr)) != 0) {
                perror("Connect failed");
                kill(0, SIGTERM);
                exit(1);
index b5af08af8559592df7b4cf4b1ffe8fb3893e14dc..4a110bb01e53e7c0155a7bf0e32a6817d9161517 100755 (executable)
@@ -18,14 +18,15 @@ readonly V4_ADDR1=10.0.10.2
 readonly V6_ADDR0=2001:db8:91::1
 readonly V6_ADDR1=2001:db8:91::2
 nsid=100
+ret=0
 
 cleanup_v6()
 {
     ip netns del me
     ip netns del peer
 
-    sysctl -w net.ipv4.conf.veth0.ndisc_evict_nocarrier=1 >/dev/null 2>&1
-    sysctl -w net.ipv4.conf.all.ndisc_evict_nocarrier=1 >/dev/null 2>&1
+    sysctl -w net.ipv6.conf.veth1.ndisc_evict_nocarrier=1 >/dev/null 2>&1
+    sysctl -w net.ipv6.conf.all.ndisc_evict_nocarrier=1 >/dev/null 2>&1
 }
 
 create_ns()
@@ -61,7 +62,7 @@ setup_v6() {
     if [ $? -ne 0 ]; then
         cleanup_v6
         echo "failed"
-        exit
+        exit 1
     fi
 
     # Set veth2 down, which will put veth1 in NOCARRIER state
@@ -88,7 +89,7 @@ setup_v4() {
     if [ $? -ne 0 ]; then
         cleanup_v4
         echo "failed"
-        exit
+        exit 1
     fi
 
     # Set veth1 down, which will put veth0 in NOCARRIER state
@@ -115,6 +116,7 @@ run_arp_evict_nocarrier_enabled() {
 
     if [ $? -eq 0 ];then
         echo "failed"
+        ret=1
     else
         echo "ok"
     fi
@@ -134,6 +136,7 @@ run_arp_evict_nocarrier_disabled() {
         echo "ok"
     else
         echo "failed"
+        ret=1
     fi
 
     cleanup_v4
@@ -164,6 +167,7 @@ run_ndisc_evict_nocarrier_enabled() {
 
     if [ $? -eq 0 ];then
         echo "failed"
+        ret=1
     else
         echo "ok"
     fi
@@ -182,6 +186,7 @@ run_ndisc_evict_nocarrier_disabled() {
         echo "ok"
     else
         echo "failed"
+        ret=1
     fi
 
     cleanup_v6
@@ -198,6 +203,7 @@ run_ndisc_evict_nocarrier_disabled_all() {
         echo "ok"
     else
         echo "failed"
+        ret=1
     fi
 
     cleanup_v6
@@ -218,3 +224,4 @@ if [ "$(id -u)" -ne 0 ];then
 fi
 
 run_all_tests
+exit $ret
diff --git a/tools/testing/selftests/net/bind_timewait.c b/tools/testing/selftests/net/bind_timewait.c
new file mode 100644 (file)
index 0000000..cb9fdf5
--- /dev/null
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright Amazon.com Inc. or its affiliates. */
+
+#include <sys/socket.h>
+#include <netinet/in.h>
+
+#include "../kselftest_harness.h"
+
+FIXTURE(bind_timewait)
+{
+       struct sockaddr_in addr;
+       socklen_t addrlen;
+};
+
+FIXTURE_VARIANT(bind_timewait)
+{
+       __u32 addr_const;
+};
+
+FIXTURE_VARIANT_ADD(bind_timewait, localhost)
+{
+       .addr_const = INADDR_LOOPBACK
+};
+
+FIXTURE_VARIANT_ADD(bind_timewait, addrany)
+{
+       .addr_const = INADDR_ANY
+};
+
+FIXTURE_SETUP(bind_timewait)
+{
+       self->addr.sin_family = AF_INET;
+       self->addr.sin_port = 0;
+       self->addr.sin_addr.s_addr = htonl(variant->addr_const);
+       self->addrlen = sizeof(self->addr);
+}
+
+FIXTURE_TEARDOWN(bind_timewait)
+{
+}
+
+void create_timewait_socket(struct __test_metadata *_metadata,
+                           FIXTURE_DATA(bind_timewait) *self)
+{
+       int server_fd, client_fd, child_fd, ret;
+       struct sockaddr_in addr;
+       socklen_t addrlen;
+
+       server_fd = socket(AF_INET, SOCK_STREAM, 0);
+       ASSERT_GT(server_fd, 0);
+
+       ret = bind(server_fd, (struct sockaddr *)&self->addr, self->addrlen);
+       ASSERT_EQ(ret, 0);
+
+       ret = listen(server_fd, 1);
+       ASSERT_EQ(ret, 0);
+
+       ret = getsockname(server_fd, (struct sockaddr *)&self->addr, &self->addrlen);
+       ASSERT_EQ(ret, 0);
+
+       client_fd = socket(AF_INET, SOCK_STREAM, 0);
+       ASSERT_GT(client_fd, 0);
+
+       ret = connect(client_fd, (struct sockaddr *)&self->addr, self->addrlen);
+       ASSERT_EQ(ret, 0);
+
+       addrlen = sizeof(addr);
+       child_fd = accept(server_fd, (struct sockaddr *)&addr, &addrlen);
+       ASSERT_GT(child_fd, 0);
+
+       close(child_fd);
+       close(client_fd);
+       close(server_fd);
+}
+
+TEST_F(bind_timewait, 1)
+{
+       int fd, ret;
+
+       create_timewait_socket(_metadata, self);
+
+       fd = socket(AF_INET, SOCK_STREAM, 0);
+       ASSERT_GT(fd, 0);
+
+       ret = bind(fd, (struct sockaddr *)&self->addr, self->addrlen);
+       ASSERT_EQ(ret, -1);
+       ASSERT_EQ(errno, EADDRINUSE);
+
+       close(fd);
+}
+
+TEST_HARNESS_MAIN
index 75dd83e39207bd38d6943c4eff5d155c005e12d3..24b21b15ed3fb05ba677328c4d68a258d9819169 100644 (file)
@@ -110,7 +110,7 @@ static void __attribute__((noreturn)) cs_usage(const char *bin)
 
 static void cs_parse_args(int argc, char *argv[])
 {
-       char o;
+       int o;
 
        while ((o = getopt(argc, argv, "46sS:p:m:M:d:tf:F:c:C:l:L:H:")) != -1) {
                switch (o) {
index dca1e6f777a8979d0b0fedf5e7f2fb05c9515fc6..f11756e7df2f91bdc631bc7afd13b0fc45ce30e9 100755 (executable)
 # In addition this script also checks if forcing a specific field in the
 # outer header is working.
 
+# Return 4 by default (Kselftest SKIP code)
+ERR=4
+
 if [ "$(id -u)" != "0" ]; then
        echo "Please run as root."
-       exit 0
+       exit $ERR
 fi
 if ! which tcpdump > /dev/null 2>&1; then
        echo "No tcpdump found. Required for this test."
-       exit 0
+       exit $ERR
 fi
 
 expected_tos="0x00"
 expected_ttl="0"
 failed=false
 
+readonly NS0=$(mktemp -u ns0-XXXXXXXX)
+readonly NS1=$(mktemp -u ns1-XXXXXXXX)
+
+RUN_NS0="ip netns exec ${NS0}"
+
 get_random_tos() {
        # Get a random hex tos value between 0x00 and 0xfc, a multiple of 4
        echo "0x$(tr -dc '0-9a-f' < /dev/urandom | head -c 1)\
@@ -61,7 +69,6 @@ setup() {
        local vlan="$5"
        local test_tos="0x00"
        local test_ttl="0"
-       local ns="ip netns exec testing"
 
        # We don't want a test-tos of 0x00,
        # because this is the value that we get when no tos is set.
@@ -94,14 +101,15 @@ setup() {
        printf "│%7s │%6s │%6s │%13s │%13s │%6s │" \
        "$type" "$outer" "$inner" "$tos" "$ttl" "$vlan"
 
-       # Create 'testing' netns, veth pair and connect main ns with testing ns
-       ip netns add testing
-       ip link add type veth
-       ip link set veth1 netns testing
-       ip link set veth0 up
-       $ns ip link set veth1 up
-       ip addr flush dev veth0
-       $ns ip addr flush dev veth1
+       # Create netns NS0 and NS1 and connect them with a veth pair
+       ip netns add "${NS0}"
+       ip netns add "${NS1}"
+       ip link add name veth0 netns "${NS0}" type veth \
+               peer name veth1 netns "${NS1}"
+       ip -netns "${NS0}" link set dev veth0 up
+       ip -netns "${NS1}" link set dev veth1 up
+       ip -netns "${NS0}" address flush dev veth0
+       ip -netns "${NS1}" address flush dev veth1
 
        local local_addr1=""
        local local_addr2=""
@@ -127,51 +135,59 @@ setup() {
                if [ "$type" = "gre" ]; then
                        type="gretap"
                fi
-               ip addr add 198.18.0.1/24 dev veth0
-               $ns ip addr add 198.18.0.2/24 dev veth1
-               ip link add name tep0 type $type $local_addr1 remote \
-               198.18.0.2 tos $test_tos ttl $test_ttl $vxlan $geneve
-               $ns ip link add name tep1 type $type $local_addr2 remote \
-               198.18.0.1 tos $test_tos ttl $test_ttl $vxlan $geneve
+               ip -netns "${NS0}" address add 198.18.0.1/24 dev veth0
+               ip -netns "${NS1}" address add 198.18.0.2/24 dev veth1
+               ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
+                       remote 198.18.0.2 tos $test_tos ttl $test_ttl         \
+                       $vxlan $geneve
+               ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
+                       remote 198.18.0.1 tos $test_tos ttl $test_ttl         \
+                       $vxlan $geneve
        elif [ "$outer" = "6" ]; then
                if [ "$type" = "gre" ]; then
                        type="ip6gretap"
                fi
-               ip addr add fdd1:ced0:5d88:3fce::1/64 dev veth0
-               $ns ip addr add fdd1:ced0:5d88:3fce::2/64 dev veth1
-               ip link add name tep0 type $type $local_addr1 \
-               remote fdd1:ced0:5d88:3fce::2 tos $test_tos ttl $test_ttl \
-               $vxlan $geneve
-               $ns ip link add name tep1 type $type $local_addr2 \
-               remote fdd1:ced0:5d88:3fce::1 tos $test_tos ttl $test_ttl \
-               $vxlan $geneve
+               ip -netns "${NS0}" address add fdd1:ced0:5d88:3fce::1/64 \
+                       dev veth0 nodad
+               ip -netns "${NS1}" address add fdd1:ced0:5d88:3fce::2/64 \
+                       dev veth1 nodad
+               ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \
+                       remote fdd1:ced0:5d88:3fce::2 tos $test_tos           \
+                       ttl $test_ttl $vxlan $geneve
+               ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \
+                       remote fdd1:ced0:5d88:3fce::1 tos $test_tos           \
+                       ttl $test_ttl $vxlan $geneve
        fi
 
        # Bring L2-tunnel link up and create VLAN on top
-       ip link set tep0 up
-       $ns ip link set tep1 up
-       ip addr flush dev tep0
-       $ns ip addr flush dev tep1
+       ip -netns "${NS0}" link set tep0 up
+       ip -netns "${NS1}" link set tep1 up
+       ip -netns "${NS0}" address flush dev tep0
+       ip -netns "${NS1}" address flush dev tep1
        local parent
        if $vlan; then
                parent="vlan99-"
-               ip link add link tep0 name ${parent}0 type vlan id 99
-               $ns ip link add link tep1 name ${parent}1 type vlan id 99
-               ip link set ${parent}0 up
-               $ns ip link set ${parent}1 up
-               ip addr flush dev ${parent}0
-               $ns ip addr flush dev ${parent}1
+               ip -netns "${NS0}" link add link tep0 name ${parent}0 \
+                       type vlan id 99
+               ip -netns "${NS1}" link add link tep1 name ${parent}1 \
+                       type vlan id 99
+               ip -netns "${NS0}" link set dev ${parent}0 up
+               ip -netns "${NS1}" link set dev ${parent}1 up
+               ip -netns "${NS0}" address flush dev ${parent}0
+               ip -netns "${NS1}" address flush dev ${parent}1
        else
                parent="tep"
        fi
 
        # Assign inner IPv4/IPv6 addresses
        if [ "$inner" = "4" ] || [ "$inner" = "other" ]; then
-               ip addr add 198.19.0.1/24 brd + dev ${parent}0
-               $ns ip addr add 198.19.0.2/24 brd + dev ${parent}1
+               ip -netns "${NS0}" address add 198.19.0.1/24 brd + dev ${parent}0
+               ip -netns "${NS1}" address add 198.19.0.2/24 brd + dev ${parent}1
        elif [ "$inner" = "6" ]; then
-               ip addr add fdd4:96cf:4eae:443b::1/64 dev ${parent}0
-               $ns ip addr add fdd4:96cf:4eae:443b::2/64 dev ${parent}1
+               ip -netns "${NS0}" address add fdd4:96cf:4eae:443b::1/64 \
+                       dev ${parent}0 nodad
+               ip -netns "${NS1}" address add fdd4:96cf:4eae:443b::2/64 \
+                       dev ${parent}1 nodad
        fi
 }
 
@@ -192,10 +208,10 @@ verify() {
                ping_dst="198.19.0.3" # Generates ARPs which are not IPv4/IPv6
        fi
        if [ "$tos_ttl" = "inherit" ]; then
-               ping -i 0.1 $ping_dst -Q "$expected_tos" -t "$expected_ttl" \
-               2>/dev/null 1>&2 & ping_pid="$!"
+               ${RUN_NS0} ping -i 0.1 $ping_dst -Q "$expected_tos"          \
+                        -t "$expected_ttl" 2>/dev/null 1>&2 & ping_pid="$!"
        else
-               ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
+               ${RUN_NS0} ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!"
        fi
        local tunnel_type_offset tunnel_type_proto req_proto_offset req_offset
        if [ "$type" = "gre" ]; then
@@ -216,10 +232,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x01 and \
-                       ip[$req_offset] = 0x08 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x01 and              \
+                               ip[$req_offset] = 0x08 2>/dev/null            \
+                               | head -n 1)"
                elif [ "$inner" = "6" ]; then
                        req_proto_offset="44"
                        req_offset="78"
@@ -231,10 +249,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x3a and \
-                       ip[$req_offset] = 0x80 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x3a and              \
+                               ip[$req_offset] = 0x80 2>/dev/null            \
+                               | head -n 1)"
                elif [ "$inner" = "other" ]; then
                        req_proto_offset="36"
                        req_offset="45"
@@ -250,11 +270,13 @@ verify() {
                                expected_tos="0x00"
                                expected_ttl="64"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip[$req_proto_offset] = 0x08 and \
-                       ip[$((req_proto_offset + 1))] = 0x06 and \
-                       ip[$req_offset] = 0x01 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip[$req_proto_offset] = 0x08 and              \
+                               ip[$((req_proto_offset + 1))] = 0x06 and      \
+                               ip[$req_offset] = 0x01 2>/dev/null            \
+                               | head -n 1)"
                fi
        elif [ "$outer" = "6" ]; then
                if [ "$type" = "gre" ]; then
@@ -273,10 +295,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x01 and \
-                       ip6[$req_offset] = 0x08 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x01 and             \
+                               ip6[$req_offset] = 0x08 2>/dev/null           \
+                               | head -n 1)"
                elif [ "$inner" = "6" ]; then
                        local req_proto_offset="72"
                        local req_offset="106"
@@ -288,10 +312,12 @@ verify() {
                                req_proto_offset="$((req_proto_offset + 4))"
                                req_offset="$((req_offset + 4))"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x3a and \
-                       ip6[$req_offset] = 0x80 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x3a and             \
+                               ip6[$req_offset] = 0x80 2>/dev/null           \
+                               | head -n 1)"
                elif [ "$inner" = "other" ]; then
                        local req_proto_offset="64"
                        local req_offset="73"
@@ -307,15 +333,17 @@ verify() {
                                expected_tos="0x00"
                                expected_ttl="64"
                        fi
-                       out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \
-                       ip6[$tunnel_type_offset] = $tunnel_type_proto and \
-                       ip6[$req_proto_offset] = 0x08 and \
-                       ip6[$((req_proto_offset + 1))] = 0x06 and \
-                       ip6[$req_offset] = 0x01 2>/dev/null | head -n 1)"
+                       out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \
+                               -i veth0 -n                                   \
+                               ip6[$tunnel_type_offset] = $tunnel_type_proto and \
+                               ip6[$req_proto_offset] = 0x08 and             \
+                               ip6[$((req_proto_offset + 1))] = 0x06 and     \
+                               ip6[$req_offset] = 0x01 2>/dev/null           \
+                               | head -n 1)"
                fi
        fi
        kill -9 $ping_pid
-       wait $ping_pid 2>/dev/null
+       wait $ping_pid 2>/dev/null || true
        result="FAIL"
        if [ "$outer" = "4" ]; then
                captured_ttl="$(get_field "ttl" "$out")"
@@ -351,11 +379,35 @@ verify() {
 }
 
 cleanup() {
-       ip link del veth0 2>/dev/null
-       ip netns del testing 2>/dev/null
-       ip link del tep0 2>/dev/null
+       ip netns del "${NS0}" 2>/dev/null
+       ip netns del "${NS1}" 2>/dev/null
 }
 
+exit_handler() {
+       # Don't exit immediately if one of the intermediate commands fails.
+       # We might be called at the end of the script, when the network
+       # namespaces have already been deleted. So cleanup() may fail, but we
+       # still need to run until 'exit $ERR' or the script won't return the
+       # correct error code.
+       set +e
+
+       cleanup
+
+       exit $ERR
+}
+
+# Restore the default SIGINT handler (just in case) and exit.
+# The exit handler will take care of cleaning everything up.
+interrupted() {
+       trap - INT
+
+       exit $ERR
+}
+
+set -e
+trap exit_handler EXIT
+trap interrupted INT
+
 printf "┌────────┬───────┬───────┬──────────────┬"
 printf "──────────────┬───────┬────────┐\n"
 for type in gre vxlan geneve; do
@@ -385,6 +437,10 @@ done
 printf "└────────┴───────┴───────┴──────────────┴"
 printf "──────────────┴───────┴────────┘\n"
 
+# All tests done.
+# Set ERR appropriately: it will be returned by the exit handler.
 if $failed; then
-       exit 1
+       ERR=1
+else
+       ERR=0
 fi
index a29deb9fa024cd5804e5be81f6b1cfcaa821c00d..ab2d581f28a1d1663c59ab19822d8c86650b027d 100755 (executable)
@@ -752,6 +752,52 @@ test_subflows()
           "$server4_token" > /dev/null 2>&1
 }
 
+test_subflows_v4_v6_mix()
+{
+       # Attempt to add a listener at 10.0.2.1:<subflow-port>
+       ip netns exec "$ns1" ./pm_nl_ctl listen 10.0.2.1\
+          $app6_port > /dev/null 2>&1 &
+       local listener_pid=$!
+
+       # ADD_ADDR4 from server to client machine reusing the subflow port on
+       # the established v6 connection
+       :>"$client_evts"
+       ip netns exec "$ns1" ./pm_nl_ctl ann 10.0.2.1 token "$server6_token" id\
+          $server_addr_id dev ns1eth2 > /dev/null 2>&1
+       stdbuf -o0 -e0 printf "ADD_ADDR4 id:%d 10.0.2.1 (ns1) => ns2, reuse port\t\t" $server_addr_id
+       sleep 0.5
+       verify_announce_event "$client_evts" "$ANNOUNCED" "$client6_token" "10.0.2.1"\
+                             "$server_addr_id" "$app6_port"
+
+       # CREATE_SUBFLOW from client to server machine
+       :>"$client_evts"
+       ip netns exec "$ns2" ./pm_nl_ctl csf lip 10.0.2.2 lid 23 rip 10.0.2.1 rport\
+          $app6_port token "$client6_token" > /dev/null 2>&1
+       sleep 0.5
+       verify_subflow_events "$client_evts" "$SUB_ESTABLISHED" "$client6_token"\
+                             "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
+                             "$server_addr_id" "ns2" "ns1"
+
+       # Delete the listener from the server ns, if one was created
+       kill_wait $listener_pid
+
+       sport=$(sed --unbuffered -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q' "$client_evts")
+
+       # DESTROY_SUBFLOW from client to server machine
+       :>"$client_evts"
+       ip netns exec "$ns2" ./pm_nl_ctl dsf lip 10.0.2.2 lport "$sport" rip 10.0.2.1 rport\
+          $app6_port token "$client6_token" > /dev/null 2>&1
+       sleep 0.5
+       verify_subflow_events "$client_evts" "$SUB_CLOSED" "$client6_token" \
+                             "$AF_INET" "10.0.2.2" "10.0.2.1" "$app6_port" "23"\
+                             "$server_addr_id" "ns2" "ns1"
+
+       # RM_ADDR from server to client machine
+       ip netns exec "$ns1" ./pm_nl_ctl rem id $server_addr_id token\
+          "$server6_token" > /dev/null 2>&1
+       sleep 0.5
+}
+
 test_prio()
 {
        local count
@@ -861,6 +907,7 @@ make_connection "v6"
 test_announce
 test_remove
 test_subflows
+test_subflows_v4_v6_mix
 test_prio
 test_listener
 
index 90026a27eac0cb678bcb3ed15e6855e2cb0aa885..9ba03164d73a692f6ea39c560d43e678659ce21f 100644 (file)
@@ -215,7 +215,7 @@ static char *recv_frame(const struct ring_state *ring, char *frame)
 }
 
 /* A single TPACKET_V3 block can hold multiple frames */
-static void recv_block(struct ring_state *ring)
+static bool recv_block(struct ring_state *ring)
 {
        struct tpacket_block_desc *block;
        char *frame;
@@ -223,7 +223,7 @@ static void recv_block(struct ring_state *ring)
 
        block = (void *)(ring->mmap + ring->idx * ring_block_sz);
        if (!(block->hdr.bh1.block_status & TP_STATUS_USER))
-               return;
+               return false;
 
        frame = (char *)block;
        frame += block->hdr.bh1.offset_to_first_pkt;
@@ -235,6 +235,8 @@ static void recv_block(struct ring_state *ring)
 
        block->hdr.bh1.block_status = TP_STATUS_KERNEL;
        ring->idx = (ring->idx + 1) % ring_block_nr;
+
+       return true;
 }
 
 /* simple test: sleep once unconditionally and then process all rings */
@@ -245,7 +247,7 @@ static void process_rings(void)
        usleep(1000 * cfg_timeout_msec);
 
        for (i = 0; i < num_cpus; i++)
-               recv_block(&rings[i]);
+               do {} while (recv_block(&rings[i]));
 
        fprintf(stderr, "count: pass=%u nohash=%u fail=%u\n",
                frames_received - frames_nohash - frames_error,
@@ -257,12 +259,12 @@ static char *setup_ring(int fd)
        struct tpacket_req3 req3 = {0};
        void *ring;
 
-       req3.tp_retire_blk_tov = cfg_timeout_msec;
+       req3.tp_retire_blk_tov = cfg_timeout_msec / 8;
        req3.tp_feature_req_word = TP_FT_REQ_FILL_RXHASH;
 
        req3.tp_frame_size = 2048;
        req3.tp_frame_nr = 1 << 10;
-       req3.tp_block_nr = 2;
+       req3.tp_block_nr = 16;
 
        req3.tp_block_size = req3.tp_frame_size * req3.tp_frame_nr;
        req3.tp_block_size /= req3.tp_block_nr;
index a7f62ad4f6611d1cd987b84369c1da3f213d338a..2ffba45a78bf450bd3f969476607d2866a2dcb85 100755 (executable)
 ksft_skip=4
 
 testns=testns-$(mktemp -u "XXXXXXXX")
+tmp=""
 
 tables="foo bar baz quux"
 global_ret=0
 eret=0
 lret=0
 
+cleanup() {
+       ip netns pids "$testns" | xargs kill 2>/dev/null
+       ip netns del "$testns"
+
+       rm -f "$tmp"
+}
+
 check_result()
 {
        local r=$1
@@ -43,6 +51,7 @@ if [ $? -ne 0 ];then
        exit $ksft_skip
 fi
 
+trap cleanup EXIT
 tmp=$(mktemp)
 
 for table in $tables; do
@@ -139,11 +148,4 @@ done
 
 check_result $lret "add/delete with nftrace enabled"
 
-pkill -9 ping
-
-wait
-
-rm -f "$tmp"
-ip netns del "$testns"
-
 exit $global_ret
diff --git a/tools/testing/selftests/netfilter/settings b/tools/testing/selftests/netfilter/settings
new file mode 100644 (file)
index 0000000..6091b45
--- /dev/null
@@ -0,0 +1 @@
+timeout=120
index d95b1cb43d9d08cb6c41eaef267b992082705910..7588428b8fcd7de0791a3fc66e45503a2bc95247 100644 (file)
@@ -25,6 +25,7 @@
 #undef NDEBUG
 #include <assert.h>
 #include <errno.h>
+#include <stdint.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
@@ -41,7 +42,7 @@
  * 1: vsyscall VMA is --xp             vsyscall=xonly
  * 2: vsyscall VMA is r-xp             vsyscall=emulate
  */
-static int g_vsyscall;
+static volatile int g_vsyscall;
 static const char *g_proc_pid_maps_vsyscall;
 static const char *g_proc_pid_smaps_vsyscall;
 
@@ -147,11 +148,12 @@ static void vsyscall(void)
 
                g_vsyscall = 0;
                /* gettimeofday(NULL, NULL); */
+               uint64_t rax = 0xffffffffff600000;
                asm volatile (
-                       "call %P0"
-                       :
-                       : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
-                       : "rax", "rcx", "r11"
+                       "call *%[rax]"
+                       : [rax] "+a" (rax)
+                       : "D" (NULL), "S" (NULL)
+                       : "rcx", "r11"
                );
 
                g_vsyscall = 1;
index 69551bfa215c4e8321a1971f405e602a67940148..cacbd2a4aec9120b06285a47688cf4b4decc6a2d 100644 (file)
@@ -257,11 +257,12 @@ static void vsyscall(void)
 
                g_vsyscall = 0;
                /* gettimeofday(NULL, NULL); */
+               uint64_t rax = 0xffffffffff600000;
                asm volatile (
-                       "call %P0"
-                       :
-                       : "i" (0xffffffffff600000), "D" (NULL), "S" (NULL)
-                       : "rax", "rcx", "r11"
+                       "call *%[rax]"
+                       : [rax] "+a" (rax)
+                       : "D" (NULL), "S" (NULL)
+                       : "rcx", "r11"
                );
 
                g_vsyscall = 1;
index 6d1fccd3d86ced87840be5e23d77ff276b73aa9b..b68920d527503cfbd59fb33f8891501c65040da7 100644 (file)
@@ -140,25 +140,19 @@ static inline void busy_wait(void)
 #define smp_wmb() smp_release()
 #endif
 
-#ifdef __alpha__
-#define smp_read_barrier_depends() smp_acquire()
-#else
-#define smp_read_barrier_depends() do {} while(0)
-#endif
-
 static __always_inline
 void __read_once_size(const volatile void *p, void *res, int size)
 {
-        switch (size) {                                                 \
-        case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;              \
-        case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;            \
-        case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;            \
-        case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;            \
-        default:                                                        \
-                barrier();                                              \
-                __builtin_memcpy((void *)res, (const void *)p, size);   \
-                barrier();                                              \
-        }                                                               \
+       switch (size) {
+       case 1: *(unsigned char *)res = *(volatile unsigned char *)p; break;
+       case 2: *(unsigned short *)res = *(volatile unsigned short *)p; break;
+       case 4: *(unsigned int *)res = *(volatile unsigned int *)p; break;
+       case 8: *(unsigned long long *)res = *(volatile unsigned long long *)p; break;
+       default:
+               barrier();
+               __builtin_memcpy((void *)res, (const void *)p, size);
+               barrier();
+       }
 }
 
 static __always_inline void __write_once_size(volatile void *p, void *res, int size)
@@ -175,13 +169,22 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        }
 }
 
+#ifdef __alpha__
 #define READ_ONCE(x) \
 ({                                                                     \
        union { typeof(x) __val; char __c[1]; } __u;                    \
        __read_once_size(&(x), __u.__c, sizeof(x));             \
-       smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
+       smp_mb(); /* Enforce dependency ordering from x */              \
+       __u.__val;                                                      \
+})
+#else
+#define READ_ONCE(x)                                                   \
+({                                                                     \
+       union { typeof(x) __val; char __c[1]; } __u;                    \
+       __read_once_size(&(x), __u.__c, sizeof(x));                     \
        __u.__val;                                                      \
 })
+#endif
 
 #define WRITE_ONCE(x, val) \
 ({                                                     \
index 73d253d4b559a7274dc20c35c801f3a9ac669054..39860be6e2d86fc062571e2179596c9dd20a470c 100644 (file)
@@ -75,7 +75,7 @@ static int wait_order(int ctl_fd)
 
                if (ret)
                        break;
-       };
+       }
 
        return ret;
 
index 86a410ddceddec107f3dd0cd294bb2785c8deba5..120062f94590c6b47a245e9df5c9c480d0a487c2 100644 (file)
@@ -173,7 +173,7 @@ static void run_test(struct vdev_info *dev, struct vq_info *vq,
        long started = 0, completed = 0, next_reset = reset_n;
        long completed_before, started_before;
        int r, test = 1;
-       unsigned len;
+       unsigned int len;
        long long spurious = 0;
        const bool random_batch = batch == RANDOM_BATCH;
 
index fa87b58bd5fa5f7c0cb42dc84e826822a7abb23f..98ff808d6f0c2d170d844f8a395cb28075b7d0eb 100644 (file)
@@ -308,6 +308,7 @@ static int parallel_test(u64 features,
 
                gvdev.vdev.features = features;
                INIT_LIST_HEAD(&gvdev.vdev.vqs);
+               spin_lock_init(&gvdev.vdev.vqs_list_lock);
                gvdev.to_host_fd = to_host[1];
                gvdev.notifies = 0;
 
@@ -455,6 +456,7 @@ int main(int argc, char *argv[])
        getrange = getrange_iov;
        vdev.features = 0;
        INIT_LIST_HEAD(&vdev.vqs);
+       spin_lock_init(&vdev.vqs_list_lock);
 
        while (argv[1]) {
                if (strcmp(argv[1], "--indirect") == 0)
index 13e88297f999631d1322db5bbc04b51159744578..9c60384b5ae0bacd9bbe1bb417bb5ff8afce2229 100644 (file)
@@ -3954,6 +3954,13 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
        }
 
        mutex_lock(&kvm->lock);
+
+#ifdef CONFIG_LOCKDEP
+       /* Ensure that lockdep knows vcpu->mutex is taken *inside* kvm->lock */
+       mutex_lock(&vcpu->mutex);
+       mutex_unlock(&vcpu->mutex);
+#endif
+
        if (kvm_get_vcpu_by_id(kvm, id)) {
                r = -EEXIST;
                goto unlock_vcpu_destroy;
index a1ab15006af34e81956590e2f3ddb6c7891c5711..180f1a09e6ba7bc8b25275e6f938a1484c6e5f30 100644 (file)
 #define KVM_MMU_LOCK_INIT(kvm)         rwlock_init(&(kvm)->mmu_lock)
 #define KVM_MMU_LOCK(kvm)              write_lock(&(kvm)->mmu_lock)
 #define KVM_MMU_UNLOCK(kvm)            write_unlock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_LOCK(kvm)         read_lock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_UNLOCK(kvm)       read_unlock(&(kvm)->mmu_lock)
 #else
 #define KVM_MMU_LOCK_INIT(kvm)         spin_lock_init(&(kvm)->mmu_lock)
 #define KVM_MMU_LOCK(kvm)              spin_lock(&(kvm)->mmu_lock)
 #define KVM_MMU_UNLOCK(kvm)            spin_unlock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_LOCK(kvm)         spin_lock(&(kvm)->mmu_lock)
-#define KVM_MMU_READ_UNLOCK(kvm)       spin_unlock(&(kvm)->mmu_lock)
 #endif /* KVM_HAVE_MMU_RWLOCK */
 
 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible,