Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Nov 2019 16:15:01 +0000 (08:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Nov 2019 16:15:01 +0000 (08:15 -0800)
Pull clk fixes from Stephen Boyd:
 "Fixes for various clk driver issues that happened because of code we
  merged this merge window.

  The Amlogic driver was missing some flags causing rates to be rounded
  improperly or clk_set_rate() to fail. The Samsung driver wasn't
  freeing everything on error paths and improperly saving/restoring PLL
  state across suspend/resume. The at91 driver was calling msleep() too
  early when scheduling hadn't started, so we put in place a quick
  solution until we can handle this sort of problem in the core
  framework.

  There were also problems with the Allwinner driver and operator
  precedence being incorrect causing subtle bugs. Finally, the TI driver
  was duplicating aliases and not delaying long enough leading to some
  unexpected timeouts"

* tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux:
  clk: ti: clkctrl: Fix failed to enable error with double udelay timeout
  clk: ti: dra7-atl-clock: Remove ti_clk_add_alias call
  clk: sunxi-ng: a80: fix the zero'ing of bits 16 and 18
  clk: sunxi: Fix operator precedence in sunxi_divs_clk_setup
  clk: ast2600: Fix enabling of clocks
  clk: at91: avoid sleeping early
  clk: imx8m: Use SYS_PLL1_800M as intermediate parent of CLK_ARM
  clk: samsung: exynos5420: Preserve PLL configuration during suspend/resume
  clk: samsung: exynos542x: Move G3D subsystem clocks to its sub-CMU
  clk: samsung: exynos5433: Fix error paths
  clk: at91: sam9x60: fix programmable clock
  clk: meson: g12a: set CLK_MUX_ROUND_CLOSEST on the cpu clock muxes
  clk: meson: g12a: fix cpu clock rate setting
  clk: meson: gxbb: let sar_adc_clk_div set the parent clock rate

1536 files changed:
.mailmap
CREDITS
Documentation/admin-guide/cgroup-v2.rst
Documentation/admin-guide/kernel-parameters.txt
Documentation/arm64/memory.rst
Documentation/arm64/silicon-errata.rst
Documentation/core-api/index.rst
Documentation/core-api/memory-allocation.rst
Documentation/core-api/symbol-namespaces.rst [new file with mode: 0644]
Documentation/dev-tools/kasan.rst
Documentation/dev-tools/kselftest.rst
Documentation/devicetree/bindings/arm/rockchip.yaml
Documentation/devicetree/bindings/dsp/fsl,dsp.yaml
Documentation/devicetree/bindings/iio/adc/adi,ad7192.yaml
Documentation/devicetree/bindings/interrupt-controller/atmel,aic.txt
Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
Documentation/devicetree/bindings/media/rc.yaml
Documentation/devicetree/bindings/phy/lantiq,vrx200-pcie-phy.yaml
Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
Documentation/devicetree/bindings/regulator/fixed-regulator.yaml
Documentation/devicetree/bindings/riscv/cpus.yaml
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/usb/amlogic,dwc3.txt
Documentation/devicetree/bindings/usb/generic-ehci.yaml
Documentation/devicetree/bindings/usb/generic-ohci.yaml
Documentation/devicetree/bindings/usb/mediatek,mtk-xhci.txt
Documentation/devicetree/bindings/usb/mediatek,mtu3.txt
Documentation/devicetree/bindings/usb/usb-hcd.yaml
Documentation/devicetree/bindings/usb/usb-uhci.txt
Documentation/devicetree/bindings/usb/usb-xhci.txt
Documentation/hwmon/index.rst
Documentation/hwmon/inspur-ipsps1.rst
Documentation/hwmon/k10temp.rst
Documentation/kbuild/makefiles.rst
Documentation/kbuild/modules.rst
Documentation/kbuild/namespaces.rst [deleted file]
Documentation/kbuild/reproducible-builds.rst
Documentation/networking/device_drivers/index.rst
Documentation/networking/device_drivers/intel/e100.rst
Documentation/networking/device_drivers/intel/e1000.rst
Documentation/networking/device_drivers/intel/e1000e.rst
Documentation/networking/device_drivers/intel/fm10k.rst
Documentation/networking/device_drivers/intel/i40e.rst
Documentation/networking/device_drivers/intel/iavf.rst
Documentation/networking/device_drivers/intel/ice.rst
Documentation/networking/device_drivers/intel/igb.rst
Documentation/networking/device_drivers/intel/igbvf.rst
Documentation/networking/device_drivers/intel/ixgbe.rst
Documentation/networking/device_drivers/intel/ixgbevf.rst
Documentation/networking/device_drivers/pensando/ionic.rst
Documentation/networking/ip-sysctl.txt
Documentation/networking/j1939.rst
Documentation/networking/net_dim.txt
Documentation/process/coding-style.rst
Documentation/process/deprecated.rst
Documentation/usb/rio.rst [deleted file]
MAINTAINERS
Makefile
arch/arc/boot/dts/hsdk.dts
arch/arc/configs/hsdk_defconfig
arch/arc/kernel/perf_event.c
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am3874-iceboard.dts
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/bcm2835-rpi-zero-w.dts
arch/arm/boot/dts/bcm2837-rpi-cm3.dtsi
arch/arm/boot/dts/dra7-l4.dtsi
arch/arm/boot/dts/imx6-logicpd-som.dtsi
arch/arm/boot/dts/imx7s.dtsi
arch/arm/boot/dts/logicpd-torpedo-som.dtsi
arch/arm/boot/dts/mt7629-rfb.dts
arch/arm/boot/dts/mt7629.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap4-droid4-xt894.dts
arch/arm/boot/dts/omap4-panda-common.dtsi
arch/arm/boot/dts/omap4-sdp.dts
arch/arm/boot/dts/omap4-var-som-om44-wlan.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap54xx-clocks.dtsi
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/stm32mp157-pinctrl.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23-a33.dtsi
arch/arm/boot/dts/sun8i-a83t.dtsi
arch/arm/boot/dts/sun8i-r40.dtsi
arch/arm/boot/dts/sun9i-a80.dtsi
arch/arm/boot/dts/sunxi-h3-h5.dtsi
arch/arm/boot/dts/vf610-zii-scu4-aib.dts
arch/arm/configs/badge4_defconfig
arch/arm/configs/corgi_defconfig
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/configs/pxa_defconfig
arch/arm/configs/s3c2410_defconfig
arch/arm/configs/spitz_defconfig
arch/arm/crypto/Kconfig
arch/arm/crypto/aes-ce-core.S
arch/arm/include/asm/domain.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/xen/xen-ops.h [deleted file]
arch/arm/kernel/head-common.S
arch/arm/kernel/head-nommu.S
arch/arm/mach-davinci/dm365.c
arch/arm/mach-omap2/omap_hwmod_33xx_43xx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_33xx_data.c
arch/arm/mach-omap2/pdata-quirks.c
arch/arm/mach-omap2/pm.c
arch/arm/mm/alignment.c
arch/arm/mm/proc-v7m.S
arch/arm/xen/Makefile
arch/arm/xen/efi.c [deleted file]
arch/arm/xen/enlighten.c
arch/arm/xen/mm.c
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/boot/dts/allwinner/sun50i-a64-pine64-plus.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-pinebook.dts
arch/arm64/boot/dts/allwinner/sun50i-a64-sopine-baseboard.dts
arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi
arch/arm64/boot/dts/allwinner/sun50i-h6.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray-pinctrl.dtsi
arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
arch/arm64/boot/dts/freescale/fsl-lx2160a.dtsi
arch/arm64/boot/dts/freescale/imx8mm.dtsi
arch/arm64/boot/dts/freescale/imx8mn.dtsi
arch/arm64/boot/dts/freescale/imx8mq-zii-ultra.dtsi
arch/arm64/boot/dts/freescale/imx8mq.dtsi
arch/arm64/boot/dts/marvell/armada-3720-turris-mox.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts
arch/arm64/boot/dts/rockchip/rk3399-hugsun-x99.dts
arch/arm64/boot/dts/rockchip/rk3399-rockpro64.dts
arch/arm64/configs/defconfig
arch/arm64/include/asm/asm-uaccess.h
arch/arm64/include/asm/atomic_lse.h
arch/arm64/include/asm/cpucaps.h
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable-prot.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/vdso/compat_barrier.h
arch/arm64/include/asm/vdso_datapage.h [deleted file]
arch/arm64/include/asm/xen/xen-ops.h [deleted file]
arch/arm64/kernel/armv8_deprecated.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/ftrace.c
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/process.c
arch/arm64/kernel/vdso/gettimeofday.S [deleted file]
arch/arm64/kernel/vdso32/Makefile
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/hyp/tlb.c
arch/arm64/kvm/sys_regs.c
arch/arm64/mm/fault.c
arch/arm64/xen/Makefile
arch/mips/bcm63xx/prom.c
arch/mips/boot/dts/qca/ar9331.dtsi
arch/mips/configs/mtx1_defconfig
arch/mips/configs/rm200_defconfig
arch/mips/fw/arc/memory.c
arch/mips/fw/sni/sniprom.c
arch/mips/include/asm/bmips.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/octeon/cvmx-ipd.h
arch/mips/include/asm/unistd.h
arch/mips/include/asm/vdso/gettimeofday.h
arch/mips/include/uapi/asm/hwcap.h
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/setup.c
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/syscall.c
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/mips/loongson64/Platform
arch/mips/loongson64/common/mem.c
arch/mips/loongson64/common/serial.c
arch/mips/loongson64/loongson-3/numa.c
arch/mips/mm/tlbex.c
arch/mips/pmcs-msp71xx/msp_prom.c
arch/mips/vdso/Makefile
arch/mips/vdso/gettimeofday.c [deleted file]
arch/parisc/include/asm/cache.h
arch/parisc/include/asm/ldcw.h
arch/parisc/kernel/entry.S
arch/parisc/mm/ioremap.c
arch/powerpc/boot/Makefile
arch/powerpc/include/asm/book3s/32/kup.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/elf.h
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_xive.c
arch/powerpc/kvm/book3s_xive.h
arch/powerpc/kvm/book3s_xive_native.c
arch/powerpc/platforms/cell/spufs/inode.c
arch/powerpc/platforms/powernv/eeh-powernv.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/lpar.c
arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
arch/riscv/include/asm/asm.h
arch/riscv/include/asm/bug.h
arch/riscv/include/asm/io.h
arch/riscv/include/asm/irq.h
arch/riscv/include/asm/pgtable.h
arch/riscv/include/asm/switch_to.h
arch/riscv/include/asm/tlbflush.h
arch/riscv/kernel/cpufeature.c
arch/riscv/kernel/entry.S
arch/riscv/kernel/head.h [new file with mode: 0644]
arch/riscv/kernel/irq.c
arch/riscv/kernel/module-sections.c
arch/riscv/kernel/process.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/reset.c
arch/riscv/kernel/setup.c
arch/riscv/kernel/signal.c
arch/riscv/kernel/smp.c
arch/riscv/kernel/smpboot.c
arch/riscv/kernel/syscall_table.c
arch/riscv/kernel/time.c
arch/riscv/kernel/traps.c
arch/riscv/kernel/vdso.c
arch/riscv/mm/context.c
arch/riscv/mm/fault.c
arch/riscv/mm/init.c
arch/riscv/mm/sifive_l2_cache.c
arch/s390/boot/startup.c
arch/s390/configs/debug_defconfig
arch/s390/configs/defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/include/asm/atomic_ops.h
arch/s390/include/asm/bitops.h
arch/s390/include/asm/cpacf.h
arch/s390/include/asm/cpu_mf.h
arch/s390/include/asm/hugetlb.h
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/qdio.h
arch/s390/include/asm/uaccess.h
arch/s390/include/asm/unwind.h
arch/s390/kernel/idle.c
arch/s390/kernel/machine_kexec_reloc.c
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/perf_cpum_sf.c
arch/s390/kernel/unwind_bc.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/cmm.c
arch/s390/pci/pci_clp.c
arch/sparc/Kconfig
arch/um/drivers/ubd_kern.c
arch/x86/boot/compressed/acpi.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/misc.c
arch/x86/events/amd/core.c
arch/x86/events/amd/ibs.c
arch/x86/events/intel/core.c
arch/x86/events/intel/cstate.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore.c
arch/x86/events/intel/uncore.h
arch/x86/events/msr.c
arch/x86/hyperv/hv_apic.c
arch/x86/include/asm/cpu_entry_area.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mwait.h
arch/x86/include/asm/pti.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/vmware.h
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/vmware.c
arch/x86/kernel/head64.c
arch/x86/kernel/process.h
arch/x86/kvm/cpuid.c
arch/x86/kvm/lapic.c
arch/x86/kvm/lapic.h
arch/x86/kvm/mmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/nested.h
arch/x86/kvm/vmx/pmu_intel.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/lib/delay.c
arch/x86/platform/efi/efi.c
arch/x86/xen/efi.c
arch/x86/xen/enlighten.c
arch/x86/xen/enlighten_pv.c
arch/xtensa/boot/dts/virt.dts
arch/xtensa/include/asm/bitops.h
arch/xtensa/include/asm/uaccess.h
arch/xtensa/kernel/xtensa_ksyms.c
block/blk-cgroup.c
block/blk-iocost.c
block/blk-mq.c
block/blk-rq-qos.c
block/blk-rq-qos.h
block/blk-wbt.c
block/elevator.c
block/sed-opal.c
drivers/acpi/cppc_acpi.c
drivers/acpi/hmat/hmat.c
drivers/acpi/nfit/core.c
drivers/acpi/processor_driver.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/sleep.c
drivers/amba/bus.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/android/binder_internal.h
drivers/ata/ahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-scsi.c
drivers/base/core.c
drivers/base/memory.c
drivers/base/platform.c
drivers/base/power/qos.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/null_blk_zoned.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/bus/ti-sysc.c
drivers/char/random.c
drivers/clk/ti/clk-7xx.c
drivers/clocksource/timer-of.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/ppc_cbe_cpufreq_pmi.c
drivers/cpuidle/cpuidle-haltpoll.c
drivers/crypto/chelsio/chtls/chtls_cm.c
drivers/crypto/chelsio/chtls/chtls_io.c
drivers/dma-buf/dma-resv.c
drivers/dma/imx-sdma.c
drivers/dma/qcom/bam_dma.c
drivers/dma/sprd-dma.c
drivers/dma/tegra210-adma.c
drivers/dma/ti/cppi41.c
drivers/dma/xilinx/xilinx_dma.c
drivers/edac/ghes_edac.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/cper.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/rci2-table.c
drivers/firmware/efi/test/efi_test.c
drivers/firmware/efi/tpm.c
drivers/firmware/google/vpd_decode.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpio-intel-mid.c
drivers/gpio/gpio-lynxpoint.c
drivers/gpio/gpio-max77620.c
drivers/gpio/gpio-merrifield.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/Makefile
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
drivers/gpu/drm/amd/amdgpu/soc15.c
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/dc/calcs/Makefile
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/core/dc_link.c
drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
drivers/gpu/drm/amd/display/dc/dml/Makefile
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
drivers/gpu/drm/amd/display/dc/dsc/Makefile
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
drivers/gpu/drm/arm/display/komeda/komeda_kms.c
drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
drivers/gpu/drm/arm/malidp_mw.c
drivers/gpu/drm/bridge/tc358767.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_writeback.c
drivers/gpu/drm/etnaviv/etnaviv_dump.c
drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
drivers/gpu/drm/etnaviv/etnaviv_mmu.c
drivers/gpu/drm/i915/display/intel_bios.c
drivers/gpu/drm/i915/display/intel_display.c
drivers/gpu/drm/i915/display/intel_display.h
drivers/gpu/drm/i915/display/intel_dp.c
drivers/gpu/drm/i915/display/intel_dp.h
drivers/gpu/drm/i915/display/intel_dp_mst.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.c
drivers/gpu/drm/i915/display/intel_dpll_mgr.h
drivers/gpu/drm/i915/display/intel_sprite.c
drivers/gpu/drm/i915/gem/i915_gem_mman.c
drivers/gpu/drm/i915/gem/i915_gem_object.h
drivers/gpu/drm/i915/gem/i915_gem_object_types.h
drivers/gpu/drm/i915/gem/i915_gem_pm.c
drivers/gpu/drm/i915/gem/i915_gem_userptr.c
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/intel_reset.h
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/gt/intel_workarounds.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem.h
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/i915_request.h
drivers/gpu/drm/i915/intel_pch.c
drivers/gpu/drm/i915/intel_pch.h
drivers/gpu/drm/i915/selftests/i915_gem.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/omapdrm/dss/dss.c
drivers/gpu/drm/panel/panel-lg-lb035q02.c
drivers/gpu/drm/panel/panel-nec-nl8048hl11.c
drivers/gpu/drm/panel/panel-sony-acx565akm.c
drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
drivers/gpu/drm/panel/panel-tpo-td043mtea1.c
drivers/gpu/drm/panfrost/panfrost_drv.c
drivers/gpu/drm/panfrost/panfrost_gpu.c
drivers/gpu/drm/panfrost/panfrost_job.c
drivers/gpu/drm/panfrost/panfrost_mmu.c
drivers/gpu/drm/panfrost/panfrost_perfcnt.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/rcar-du/rcar_du_writeback.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
drivers/gpu/drm/tiny/Kconfig
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_bo_vm.c
drivers/gpu/drm/v3d/v3d_gem.c
drivers/gpu/drm/vc4/vc4_txp.c
drivers/gpu/drm/xen/xen_drm_front.c
drivers/hid/hid-axff.c
drivers/hid/hid-core.c
drivers/hid/hid-dr.c
drivers/hid/hid-emsff.c
drivers/hid/hid-gaff.c
drivers/hid/hid-google-hammer.c
drivers/hid/hid-holtekff.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-ids.h
drivers/hid/hid-lg2ff.c
drivers/hid/hid-lg3ff.c
drivers/hid/hid-lg4ff.c
drivers/hid/hid-lgff.c
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-microsoft.c
drivers/hid/hid-prodikeys.c
drivers/hid/hid-sony.c
drivers/hid/hid-tmff.c
drivers/hid/hid-zpff.c
drivers/hid/i2c-hid/i2c-hid-core.c
drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
drivers/hid/intel-ish-hid/ishtp/client-buffers.c
drivers/hid/wacom.h
drivers/hid/wacom_wac.c
drivers/hv/vmbus_drv.c
drivers/hwmon/ina3221.c
drivers/hwmon/nct7904.c
drivers/i2c/busses/i2c-aspeed.c
drivers/i2c/busses/i2c-mt65xx.c
drivers/i2c/busses/i2c-stm32f7.c
drivers/iio/accel/adxl372.c
drivers/iio/accel/bmc150-accel-core.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/hx711.c
drivers/iio/adc/meson_saradc.c
drivers/iio/adc/stm32-adc-core.c
drivers/iio/adc/stm32-adc-core.h
drivers/iio/adc/stm32-adc.c
drivers/iio/imu/adis_buffer.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_shub.c
drivers/iio/light/Kconfig
drivers/iio/light/opt3001.c
drivers/iio/light/vcnl4000.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/core_priv.h
drivers/infiniband/core/device.c
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/netlink.c
drivers/infiniband/core/nldev.c
drivers/infiniband/core/security.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/hfi1/sdma.c
drivers/infiniband/hw/hfi1/tid_rdma.c
drivers/infiniband/hw/hfi1/verbs.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx5/devx.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/odp.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qedr/main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
drivers/infiniband/sw/siw/siw_qp.c
drivers/infiniband/sw/siw/siw_verbs.c
drivers/input/misc/da9063_onkey.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/elantech.c
drivers/input/rmi4/rmi_driver.c
drivers/input/touchscreen/goodix.c
drivers/input/touchscreen/st1232.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_quirks.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/arm-smmu.c
drivers/iommu/intel-iommu.c
drivers/iommu/io-pgtable-arm.c
drivers/iommu/ipmmu-vmsa.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-al-fic.c
drivers/irqchip/irq-atmel-aic5.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-sifive-plic.c
drivers/isdn/capi/capi.c
drivers/macintosh/windfarm_cpufreq_clamp.c
drivers/md/dm-cache-target.c
drivers/md/dm-clone-target.c
drivers/md/dm-snap.c
drivers/md/raid0.c
drivers/media/usb/stkwebcam/stk-webcam.c
drivers/memstick/host/jmb38x_ms.c
drivers/mfd/mt6397-core.c
drivers/misc/fastrpc.c
drivers/misc/mei/bus-fixup.c
drivers/misc/mei/hw-me-regs.h
drivers/misc/mei/hw-me.c
drivers/misc/mei/hw-me.h
drivers/misc/mei/mei_dev.h
drivers/misc/mei/pci-me.c
drivers/mmc/host/cqhci.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/renesas_sdhi_core.c
drivers/mmc/host/sdhci-iproc.c
drivers/mmc/host/sdhci-omap.c
drivers/mmc/host/sh_mmcif.c
drivers/mtd/nand/raw/au1550nd.c
drivers/mtd/spi-nor/spi-nor.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_main.c
drivers/net/dsa/b53/b53_common.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/microchip/ksz8795.c
drivers/net/dsa/microchip/ksz8795_spi.c
drivers/net/dsa/microchip/ksz9477_i2c.c
drivers/net/dsa/microchip/ksz9477_reg.h
drivers/net/dsa/microchip/ksz9477_spi.c
drivers/net/dsa/microchip/ksz_common.c
drivers/net/dsa/microchip/ksz_common.h
drivers/net/dsa/qca8k.c
drivers/net/dsa/rtl8366.c
drivers/net/dsa/rtl8366rb.c
drivers/net/dsa/sja1105/Kconfig
drivers/net/dsa/sja1105/sja1105.h
drivers/net/dsa/sja1105/sja1105_dynamic_config.h
drivers/net/dsa/sja1105/sja1105_main.c
drivers/net/dsa/sja1105/sja1105_ptp.h
drivers/net/dsa/sja1105/sja1105_spi.c
drivers/net/dsa/sja1105/sja1105_static_config.h
drivers/net/dsa/sja1105/sja1105_tas.h
drivers/net/ethernet/aquantia/atlantic/aq_main.c
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
drivers/net/ethernet/arc/emac_rockchip.c
drivers/net/ethernet/atheros/ag71xx.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/cavium/common/cavium_ptp.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/cortina/gemini.h
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.h
drivers/net/ethernet/freescale/dpaa2/dpni.h
drivers/net/ethernet/freescale/dpaa2/dprtc-cmd.h
drivers/net/ethernet/freescale/dpaa2/dprtc.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fec_ptp.c
drivers/net/ethernet/google/gve/gve_rx.c
drivers/net/ethernet/google/gve/gve_tx.c
drivers/net/ethernet/hisilicon/hip04_eth.c
drivers/net/ethernet/hisilicon/hns3/hnae3.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
drivers/net/ethernet/hisilicon/hns_mdio.c
drivers/net/ethernet/i825xx/lasi_82596.c
drivers/net/ethernet/i825xx/lib82596.c
drivers/net/ethernet/i825xx/sni_82596.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igc/igc_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/marvell/mvneta_bm.h
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/health.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
drivers/net/ethernet/mellanox/mlxsw/core.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
drivers/net/ethernet/mscc/ocelot.c
drivers/net/ethernet/mscc/ocelot_board.c
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
drivers/net/ethernet/nxp/lpc_eth.c
drivers/net/ethernet/pensando/Kconfig
drivers/net/ethernet/pensando/ionic/ionic_lif.c
drivers/net/ethernet/pensando/ionic/ionic_lif.h
drivers/net/ethernet/pensando/ionic/ionic_main.c
drivers/net/ethernet/pensando/ionic/ionic_stats.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169_main.c
drivers/net/ethernet/socionext/netsec.c
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac5.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
drivers/net/ethernet/ti/davinci_cpdma.c
drivers/net/fjes/fjes_main.c
drivers/net/hamradio/bpqether.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/ieee802154/atusb.c
drivers/net/ieee802154/ca8210.c
drivers/net/ieee802154/mcr20a.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/macsec.c
drivers/net/macvlan.c
drivers/net/netdevsim/dev.c
drivers/net/netdevsim/fib.c
drivers/net/phy/at803x.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/mdio_device.c
drivers/net/phy/micrel.c
drivers/net/phy/phy-c45.c
drivers/net/phy/phy-core.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/phylink.c
drivers/net/phy/smsc.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pptp.c
drivers/net/team/team.c
drivers/net/tun.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/hso.c
drivers/net/usb/lan78xx.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/sr9800.c
drivers/net/virtio_net.c
drivers/net/vrf.c
drivers/net/vxlan.c
drivers/net/wimax/i2400m/op-rfkill.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/intel/iwlwifi/fw/acpi.c
drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
drivers/net/wireless/intel/iwlwifi/fw/dbg.c
drivers/net/wireless/intel/iwlwifi/fw/file.h
drivers/net/wireless/intel/iwlwifi/iwl-csr.h
drivers/net/wireless/intel/iwlwifi/iwl-io.h
drivers/net/wireless/intel/iwlwifi/iwl-prph.h
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
drivers/net/wireless/intel/iwlwifi/mvm/scan.c
drivers/net/wireless/intel/iwlwifi/mvm/sta.c
drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/intersil/hostap/hostap_hw.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mediatek/mt76/Makefile
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
drivers/net/wireless/mediatek/mt76/pci.c [new file with mode: 0644]
drivers/net/wireless/ralink/rt2x00/rt2x00.h
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
drivers/net/wireless/realtek/rtlwifi/pci.c
drivers/net/wireless/realtek/rtlwifi/ps.c
drivers/net/wireless/virt_wifi.c
drivers/net/xen-netback/interface.c
drivers/net/xen-netfront.c
drivers/nfc/pn533/usb.c
drivers/nvme/host/core.c
drivers/nvme/host/fabrics.h
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/rdma.c
drivers/nvme/host/tcp.c
drivers/nvme/target/io-cmd-bdev.c
drivers/nvme/target/loop.c
drivers/nvme/target/tcp.c
drivers/of/of_reserved_mem.c
drivers/of/unittest.c
drivers/opp/core.c
drivers/opp/of.c
drivers/parisc/sba_iommu.c
drivers/pci/pci.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/aspeed/pinmux-aspeed.h
drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
drivers/pinctrl/bcm/pinctrl-ns2-mux.c
drivers/pinctrl/berlin/pinctrl-as370.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/intel/pinctrl-intel.c
drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
drivers/pinctrl/pinctrl-stmfx.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/i2c-multi-instantiate.c
drivers/platform/x86/intel_punit_ipc.c
drivers/ptp/Kconfig
drivers/ptp/ptp_qoriq.c
drivers/pwm/core.c
drivers/regulator/core.c
drivers/regulator/da9062-regulator.c
drivers/regulator/fixed.c
drivers/regulator/lochnagar-regulator.c
drivers/regulator/of_regulator.c
drivers/regulator/pfuze100-regulator.c
drivers/regulator/qcom-rpmh-regulator.c
drivers/regulator/ti-abb-regulator.c
drivers/s390/block/dasd_eckd.c
drivers/s390/cio/cio.h
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio_setup.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/scsi/zfcp_fsf.c
drivers/scsi/Kconfig
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/ch.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/hisi_sas/hisi_sas_main.c
drivers/scsi/hpsa.c
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/megaraid.c
drivers/scsi/qedf/qedf_main.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/scsi/sni_53c710.c
drivers/scsi/storvsc_drv.c
drivers/scsi/ufs/ufs_bsg.c
drivers/scsi/ufs/ufshcd.c
drivers/soc/imx/soc-imx-scu.c
drivers/staging/exfat/Kconfig
drivers/staging/exfat/Makefile
drivers/staging/exfat/exfat.h
drivers/staging/exfat/exfat_blkdev.c
drivers/staging/exfat/exfat_cache.c
drivers/staging/exfat/exfat_core.c
drivers/staging/exfat/exfat_nls.c
drivers/staging/exfat/exfat_super.c
drivers/staging/exfat/exfat_upcase.c
drivers/staging/fbtft/Kconfig
drivers/staging/fbtft/Makefile
drivers/staging/fbtft/fbtft-core.c
drivers/staging/fbtft/fbtft_device.c [deleted file]
drivers/staging/fbtft/flexfb.c [deleted file]
drivers/staging/octeon/ethernet-tx.c
drivers/staging/octeon/octeon-stubs.h
drivers/staging/rtl8188eu/hal/hal8188e_rate_adaptive.c
drivers/staging/rtl8188eu/os_dep/usb_intf.c
drivers/staging/speakup/sysfs-driver-speakup [new file with mode: 0644]
drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c
drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c
drivers/staging/vt6655/device_main.c
drivers/staging/wlan-ng/cfg80211.c
drivers/target/iscsi/cxgbit/cxgbit_cm.c
drivers/target/target_core_device.c
drivers/thermal/cpu_cooling.c
drivers/tty/n_hdlc.c
drivers/tty/serial/8250/8250_men_mcb.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/Kconfig
drivers/tty/serial/fsl_linflexuart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/owl-uart.c
drivers/tty/serial/rda-uart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/serial_mctrl_gpio.c
drivers/tty/serial/sh-sci.c
drivers/tty/serial/uartlite.c
drivers/tty/serial/xilinx_uartps.c
drivers/usb/cdns3/cdns3-pci-wrap.c
drivers/usb/cdns3/core.c
drivers/usb/cdns3/ep0.c
drivers/usb/cdns3/gadget.c
drivers/usb/cdns3/host-export.h
drivers/usb/cdns3/host.c
drivers/usb/class/usblp.c
drivers/usb/core/config.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/core.c
drivers/usb/dwc3/drd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/dwc3/gadget.c
drivers/usb/dwc3/host.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/udc/Kconfig
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/core.c
drivers/usb/gadget/udc/dummy_hcd.c
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/lpc32xx_udc.c
drivers/usb/gadget/udc/renesas_usb3.c
drivers/usb/host/xhci-debugfs.c
drivers/usb/host/xhci-ext-caps.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/image/microtek.c
drivers/usb/misc/Kconfig
drivers/usb/misc/Makefile
drivers/usb/misc/adutux.c
drivers/usb/misc/chaoskey.c
drivers/usb/misc/iowarrior.c
drivers/usb/misc/ldusb.c
drivers/usb/misc/legousbtower.c
drivers/usb/misc/rio500.c [deleted file]
drivers/usb/misc/rio500_usb.h [deleted file]
drivers/usb/misc/usblcd.c
drivers/usb/misc/yurex.c
drivers/usb/mtu3/mtu3_core.c
drivers/usb/renesas_usbhs/common.c
drivers/usb/renesas_usbhs/common.h
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/fifo.h
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/renesas_usbhs/pipe.c
drivers/usb/renesas_usbhs/pipe.h
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/keyspan.c
drivers/usb/serial/option.c
drivers/usb/serial/ti_usb_3410_5052.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/whiteheat.c
drivers/usb/serial/whiteheat.h
drivers/usb/storage/scsiglue.c
drivers/usb/storage/uas.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/ucsi/displayport.c
drivers/usb/typec/ucsi/ucsi_ccg.c
drivers/usb/usb-skeleton.c
drivers/usb/usbip/vhci_hcd.c
drivers/usb/usbip/vhci_tx.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/test.c
drivers/vhost/vringh.c
drivers/video/logo/Makefile
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virtio/virtio_ring.c
drivers/w1/slaves/Kconfig
drivers/xen/balloon.c
drivers/xen/efi.c
drivers/xen/gntdev.c
drivers/xen/grant-table.c
drivers/xen/pvcalls-back.c
drivers/xen/xenbus/xenbus_dev_frontend.c
fs/binfmt_elf.c
fs/btrfs/block-group.c
fs/btrfs/ctree.h
fs/btrfs/delalloc-space.c
fs/btrfs/disk-io.c
fs/btrfs/file.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/ref-verify.c
fs/btrfs/relocation.c
fs/btrfs/send.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/mds_client.c
fs/cifs/cifsfs.c
fs/cifs/cifsglob.h
fs/cifs/cifsproto.h
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/netmisc.c
fs/cifs/smb1ops.c
fs/cifs/smb2file.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.c
fs/cifs/smb2proto.h
fs/cifs/transport.c
fs/dax.c
fs/direct-io.c
fs/erofs/data.c
fs/erofs/super.c
fs/erofs/zdata.c
fs/fs-writeback.c
fs/fuse/Makefile
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/fuse/virtio_fs.c
fs/gfs2/ops_fstype.c
fs/io_uring.c
fs/libfs.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/direct.c
fs/nfs/nfs4proc.c
fs/nfs/write.c
fs/ocfs2/aops.c
fs/ocfs2/file.c
fs/ocfs2/ioctl.c
fs/ocfs2/journal.c
fs/ocfs2/localalloc.c
fs/ocfs2/xattr.c
fs/proc/meminfo.c
fs/proc/page.c
fs/readdir.c
fs/statfs.c
fs/super.c
fs/tracefs/inode.c
fs/xfs/libxfs/xfs_ag.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_bmap.h
fs/xfs/libxfs/xfs_dir2_block.c
fs/xfs/libxfs/xfs_fs.h
fs/xfs/scrub/refcount.c
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log_recover.c
include/acpi/processor.h
include/linux/bitmap.h
include/linux/bitops.h
include/linux/compiler_attributes.h
include/linux/cpufreq.h
include/linux/dsa/sja1105.h
include/linux/dynamic_debug.h
include/linux/efi.h
include/linux/export.h
include/linux/filter.h
include/linux/gfp.h
include/linux/gpio/driver.h
include/linux/hwmon.h
include/linux/if_macvlan.h
include/linux/if_team.h
include/linux/if_vlan.h
include/linux/kvm_host.h
include/linux/leds.h
include/linux/memcontrol.h
include/linux/micrel_phy.h
include/linux/mii.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/netdevice.h
include/linux/page-flags.h
include/linux/page_ext.h
include/linux/perf_event.h
include/linux/phy.h
include/linux/platform_data/dma-imx-sdma.h
include/linux/platform_device.h
include/linux/pm_qos.h
include/linux/sched.h
include/linux/security.h
include/linux/skbuff.h
include/linux/slab.h
include/linux/socket.h
include/linux/string.h
include/linux/sunrpc/bc_xprt.h
include/linux/sunrpc/xprtsock.h
include/linux/sysfs.h
include/linux/tcp.h
include/linux/tpm_eventlog.h
include/linux/uaccess.h
include/linux/virtio_vsock.h
include/linux/xarray.h
include/net/bonding.h
include/net/busy_poll.h
include/net/cfg80211.h
include/net/flow_dissector.h
include/net/fq.h
include/net/fq_impl.h
include/net/hwbm.h
include/net/ip.h
include/net/ip_vs.h
include/net/llc_conn.h
include/net/net_namespace.h
include/net/request_sock.h
include/net/sctp/sctp.h
include/net/sock.h
include/net/tcp.h
include/net/vxlan.h
include/rdma/ib_verbs.h
include/scsi/scsi_eh.h
include/sound/hda_register.h
include/sound/simple_card_utils.h
include/trace/events/btrfs.h
include/trace/events/rxrpc.h
include/trace/events/sock.h
include/uapi/drm/amdgpu_drm.h
include/uapi/linux/fuse.h
include/uapi/linux/nvme_ioctl.h
include/uapi/linux/pg.h
include/uapi/linux/sched.h
include/uapi/linux/serial_core.h
include/xen/xen-ops.h
kernel/bpf/core.c
kernel/bpf/devmap.c
kernel/bpf/syscall.c
kernel/cgroup/cpuset.c
kernel/dma/remap.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/fork.c
kernel/freezer.c
kernel/gen_kheaders.sh
kernel/kthread.c
kernel/panic.c
kernel/power/main.c
kernel/power/qos.c
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/fair.c
kernel/sched/membarrier.c
kernel/sched/topology.c
kernel/stop_machine.c
kernel/sysctl.c
kernel/time/hrtimer.c
kernel/time/posix-cpu-timers.c
kernel/time/sched_clock.c
kernel/time/tick-broadcast-hrtimer.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_dynevent.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_hist.c
kernel/trace/trace_events_trigger.c
kernel/trace/trace_hwlat.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_printk.c
kernel/trace/trace_stack.c
kernel/trace/trace_stat.c
kernel/trace/trace_uprobe.c
lib/dump_stack.c
lib/generic-radix-tree.c
lib/string.c
lib/strnlen_user.c
lib/test_meminit.c
lib/test_user_copy.c
lib/textsearch.c
lib/usercopy.c
lib/vdso/Kconfig
lib/vdso/gettimeofday.c
mm/backing-dev.c
mm/compaction.c
mm/filemap.c
mm/gup.c
mm/huge_memory.c
mm/hugetlb.c
mm/init-mm.c
mm/khugepaged.c
mm/kmemleak.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory_hotplug.c
mm/memremap.c
mm/mmu_notifier.c
mm/page_alloc.c
mm/page_ext.c
mm/page_owner.c
mm/rmap.c
mm/shmem.c
mm/shuffle.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slob.c
mm/slub.c
mm/sparse.c
mm/truncate.c
mm/vmpressure.c
mm/vmscan.c
mm/vmstat.c
mm/z3fold.c
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/atm/common.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/bat_v_ogm.c
net/batman-adv/hard-interface.c
net/batman-adv/soft-interface.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/af_bluetooth.c
net/bridge/br_device.c
net/bridge/netfilter/nf_conntrack_bridge.c
net/caif/caif_socket.c
net/core/datagram.c
net/core/dev.c
net/core/dev_addr_lists.c
net/core/devlink.c
net/core/ethtool.c
net/core/filter.c
net/core/flow_dissector.c
net/core/lwt_bpf.c
net/core/net_namespace.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dccp/ipv4.c
net/decnet/af_decnet.c
net/dsa/dsa2.c
net/dsa/master.c
net/dsa/slave.c
net/dsa/tag_sja1105.c
net/ieee802154/6lowpan/core.c
net/ipv4/datagram.c
net/ipv4/fib_frontend.c
net/ipv4/inet_connection_sock.c
net/ipv4/inet_diag.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_input.c
net/ipv4/ip_output.c
net/ipv4/ipmr.c
net/ipv4/netfilter/nf_dup_ipv4.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_diag.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_output.c
net/ipv4/tcp_timer.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/addrconf_core.c
net/ipv6/inet6_hashtables.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_input.c
net/ipv6/ip6_output.c
net/ipv6/netfilter.c
net/ipv6/netfilter/nf_dup_ipv6.c
net/ipv6/raw.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip.c
net/l2tp/l2tp_ip6.c
net/llc/af_llc.c
net/llc/llc_c_ac.c
net/llc/llc_conn.c
net/llc/llc_if.c
net/llc/llc_s_ac.c
net/llc/llc_sap.c
net/mac80211/debugfs_netdev.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/scan.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_app.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_pe.c
net/netfilter/ipvs/ip_vs_sched.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/ipvs/ip_vs_xmit.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_flow_table_core.c
net/netfilter/nf_tables_offload.c
net/netfilter/nft_connlimit.c
net/netfilter/nft_payload.c
net/netrom/af_netrom.c
net/nfc/llcp_sock.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/vport-internal_dev.c
net/packet/af_packet.c
net/phonet/socket.c
net/rds/ib.c
net/rose/af_rose.c
net/rxrpc/ar-internal.h
net/rxrpc/call_accept.c
net/rxrpc/call_object.c
net/rxrpc/conn_client.c
net/rxrpc/conn_object.c
net/rxrpc/conn_service.c
net/rxrpc/peer_event.c
net/rxrpc/peer_object.c
net/rxrpc/recvmsg.c
net/rxrpc/sendmsg.c
net/sched/act_api.c
net/sched/act_mirred.c
net/sched/act_mpls.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/em_meta.c
net/sched/sch_api.c
net/sched/sch_cbq.c
net/sched/sch_cbs.c
net/sched/sch_dsmark.c
net/sched/sch_etf.c
net/sched/sch_generic.c
net/sched/sch_hhf.c
net/sched/sch_netem.c
net/sched/sch_sfb.c
net/sched/sch_sfq.c
net/sched/sch_taprio.c
net/sctp/diag.c
net/sctp/input.c
net/sctp/sm_make_chunk.c
net/sctp/socket.c
net/smc/af_smc.c
net/smc/smc_core.c
net/smc/smc_pnet.c
net/smc/smc_rx.c
net/sunrpc/backchannel_rqst.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/backchannel.c
net/sunrpc/xprtsock.c
net/tipc/link.c
net/tipc/msg.c
net/tipc/socket.c
net/unix/af_unix.c
net/vmw_vsock/af_vsock.c
net/vmw_vsock/hyperv_transport.c
net/vmw_vsock/virtio_transport_common.c
net/wireless/chan.c
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/reg.h
net/wireless/scan.c
net/wireless/util.c
net/wireless/wext-compat.c
net/wireless/wext-sme.c
net/x25/x25_dev.c
net/xdp/xdp_umem.c
net/xdp/xsk.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_interface.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
samples/bpf/asm_goto_workaround.h
samples/bpf/task_fd_query_user.c
scripts/Kbuild.include
scripts/Makefile.build
scripts/Makefile.lib
scripts/coccinelle/api/devm_platform_ioremap_resource.cocci [deleted file]
scripts/coccinelle/misc/add_namespace.cocci
scripts/gdb/linux/dmesg.py
scripts/gdb/linux/symbols.py
scripts/gdb/linux/utils.py
scripts/mod/modpost.c
scripts/mod/modpost.h
scripts/namespace.pl
scripts/nsdeps
scripts/recordmcount.h
scripts/setlocalversion
security/integrity/Makefile
security/lockdown/lockdown.c
security/selinux/ss/services.c
sound/core/timer.c
sound/firewire/bebob/bebob_stream.c
sound/hda/ext/hdac_ext_controller.c
sound/hda/hdac_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98373.c
sound/soc/codecs/msm8916-wcd-digital.c
sound/soc/codecs/rt5651.c
sound/soc/codecs/rt5682.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm_adsp.c
sound/soc/intel/boards/sof_rt5682.c
sound/soc/rockchip/rockchip_i2s.c
sound/soc/samsung/arndale_rt5631.c
sound/soc/sh/rcar/core.c
sound/soc/soc-pcm.c
sound/soc/soc-topology.c
sound/soc/sof/control.c
sound/soc/sof/intel/Kconfig
sound/soc/sof/intel/bdw.c
sound/soc/sof/intel/byt.c
sound/soc/sof/intel/hda-ctrl.c
sound/soc/sof/intel/hda-loader.c
sound/soc/sof/intel/hda-stream.c
sound/soc/sof/intel/hda.c
sound/soc/sof/intel/hda.h
sound/soc/sof/loader.c
sound/soc/sof/pcm.c
sound/soc/sof/topology.c
sound/soc/stm/stm32_sai_sub.c
sound/usb/pcm.c
sound/usb/quirks.c
sound/usb/validate.c
tools/arch/arm/include/uapi/asm/kvm.h
tools/arch/arm64/include/uapi/asm/kvm.h
tools/arch/s390/include/uapi/asm/kvm.h
tools/arch/x86/include/uapi/asm/svm.h
tools/arch/x86/include/uapi/asm/vmx.h
tools/bpf/Makefile
tools/gpio/Makefile
tools/include/uapi/asm-generic/mman-common.h
tools/include/uapi/drm/i915_drm.h
tools/include/uapi/linux/fs.h
tools/include/uapi/linux/fscrypt.h [new file with mode: 0644]
tools/include/uapi/linux/kvm.h
tools/include/uapi/linux/sched.h
tools/include/uapi/linux/usbdevice_fs.h
tools/lib/bpf/Makefile
tools/lib/bpf/libbpf_internal.h
tools/lib/bpf/xsk.c
tools/lib/subcmd/Makefile
tools/perf/Documentation/asciidoc.conf
tools/perf/Documentation/jitdump-specification.txt
tools/perf/arch/arm/annotate/instructions.c
tools/perf/arch/arm64/annotate/instructions.c
tools/perf/arch/powerpc/util/header.c
tools/perf/arch/s390/annotate/instructions.c
tools/perf/arch/s390/util/header.c
tools/perf/arch/x86/annotate/instructions.c
tools/perf/arch/x86/util/header.c
tools/perf/builtin-c2c.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-script.c
tools/perf/check-headers.sh
tools/perf/jvmti/Build
tools/perf/pmu-events/arch/s390/cf_m8561/basic.json [deleted file]
tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json [deleted file]
tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json [deleted file]
tools/perf/pmu-events/arch/s390/cf_m8561/extended.json [deleted file]
tools/perf/pmu-events/arch/s390/cf_z15/basic.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z15/crypto.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z15/extended.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/cf_z15/transaction.json [new file with mode: 0644]
tools/perf/pmu-events/arch/s390/mapfile.csv
tools/perf/pmu-events/jevents.c
tools/perf/tests/perf-hooks.c
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/copyfile.c
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/jitdump.c
tools/perf/util/llvm-utils.c
tools/perf/util/map.c
tools/perf/util/python.c
tools/perf/util/util.c
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c
tools/testing/selftests/bpf/prog_tests/tcp_rtt.c
tools/testing/selftests/bpf/test_flow_dissector.sh
tools/testing/selftests/bpf/test_lwt_ip_encap.sh
tools/testing/selftests/bpf/test_offload.py
tools/testing/selftests/bpf/test_tc_edt.sh
tools/testing/selftests/kselftest/runner.sh
tools/testing/selftests/kselftest_install.sh
tools/testing/selftests/kvm/.gitignore
tools/testing/selftests/kvm/Makefile
tools/testing/selftests/kvm/include/x86_64/processor.h
tools/testing/selftests/kvm/include/x86_64/vmx.h
tools/testing/selftests/kvm/lib/kvm_util.c
tools/testing/selftests/kvm/lib/kvm_util_internal.h
tools/testing/selftests/kvm/lib/x86_64/processor.c
tools/testing/selftests/kvm/lib/x86_64/vmx.c
tools/testing/selftests/kvm/x86_64/sync_regs_test.c
tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c
tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c [new file with mode: 0644]
tools/testing/selftests/kvm/x86_64/vmx_set_nested_state_test.c
tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
tools/testing/selftests/net/.gitignore
tools/testing/selftests/net/fib_tests.sh
tools/testing/selftests/net/l2tp.sh [changed mode: 0644->0755]
tools/testing/selftests/net/reuseport_dualstack.c
tools/testing/selftests/net/udpgso.c
tools/testing/selftests/pidfd/Makefile
tools/testing/selftests/powerpc/mm/tlbie_test.c
tools/testing/selftests/rtc/settings [new file with mode: 0644]
tools/testing/selftests/vm/gup_benchmark.c
tools/testing/selftests/watchdog/watchdog-test.c
tools/usb/usbip/libsrc/usbip_device_driver.c
tools/virtio/crypto/hash.h [new file with mode: 0644]
tools/virtio/linux/dma-mapping.h
tools/virtio/xen/xen.h [new file with mode: 0644]
usr/include/Makefile
virt/kvm/arm/pmu.c
virt/kvm/arm/vgic/trace.h
virt/kvm/kvm_main.c

index edcac87e76c8c7fe9020943bdce012f7e47dc962..83d7e750c2fc89e60986049ec475d93eb1f7bb8b 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -196,7 +196,8 @@ Oleksij Rempel <linux@rempel-privat.de> <o.rempel@pengutronix.de>
 Oleksij Rempel <linux@rempel-privat.de> <ore@pengutronix.de>
 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
 Patrick Mochel <mochel@digitalimplant.org>
-Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
+Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
 Peter A Jonsson <pj@ludd.ltu.se>
 Peter Oruba <peter@oruba.de>
 Peter Oruba <peter.oruba@amd.com>
@@ -229,6 +230,7 @@ Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com>
 Shuah Khan <shuah@kernel.org> <shuah.khan@hp.com>
 Shuah Khan <shuah@kernel.org> <shuahkh@osg.samsung.com>
 Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
+Simon Arlott <simon@octiron.net> <simon@fire.lp0.eu>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 8b67a85844b55d88b27ea761ec0711c871ca6fa0..031605d46b4d5cc163114e9558225378e30303d4 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1637,6 +1637,10 @@ S: Panoramastrasse 18
 S: D-69126 Heidelberg
 S: Germany
 
+N: Simon Horman
+M: horms@verge.net.au
+D: Renesas ARM/ARM64 SoC maintainer
+
 N: Christopher Horn
 E: chorn@warwick.net
 D: Miscellaneous sysctl hacks
index 0fa8c0e615c2853587e2a87fa396fb5dbac321bb..5361ebec33612d62e8078e630d35e517db2a73b0 100644 (file)
@@ -615,8 +615,8 @@ on an IO device and is an example of this type.
 Protections
 -----------
 
-A cgroup is protected to be allocated upto the configured amount of
-the resource if the usages of all its ancestors are under their
+A cgroup is protected upto the configured amount of the resource
+as long as the usages of all its ancestors are under their
 protected levels.  Protections can be hard guarantees or best effort
 soft boundaries.  Protections can also be over-committed in which case
 only upto the amount available to the parent is protected among
@@ -1096,7 +1096,10 @@ PAGE_SIZE multiple when read back.
        is within its effective min boundary, the cgroup's memory
        won't be reclaimed under any conditions. If there is no
        unprotected reclaimable memory available, OOM killer
-       is invoked.
+       is invoked. Above the effective min boundary (or
+       effective low boundary if it is higher), pages are reclaimed
+       proportionally to the overage, reducing reclaim pressure for
+       smaller overages.
 
        Effective min boundary is limited by memory.min values of
        all ancestor cgroups. If there is memory.min overcommitment
@@ -1118,7 +1121,10 @@ PAGE_SIZE multiple when read back.
        Best-effort memory protection.  If the memory usage of a
        cgroup is within its effective low boundary, the cgroup's
        memory won't be reclaimed unless memory can be reclaimed
-       from unprotected cgroups.
+       from unprotected cgroups.  Above the effective low boundary (or
+       effective min boundary if it is higher), pages are reclaimed
+       proportionally to the overage, reducing reclaim pressure for
+       smaller overages.
 
        Effective low boundary is limited by memory.low values of
        all ancestor cgroups. If there is memory.low overcommitment
@@ -2482,8 +2488,10 @@ system performance due to overreclaim, to the point where the feature
 becomes self-defeating.
 
 The memory.low boundary on the other hand is a top-down allocated
-reserve.  A cgroup enjoys reclaim protection when it's within its low,
-which makes delegation of subtrees possible.
+reserve.  A cgroup enjoys reclaim protection when it's within its
+effective low, which makes delegation of subtrees possible. It also
+enjoys having reclaim pressure proportional to its overage when
+above its effective low.
 
 The original high boundary, the hard limit, is defined as a strict
 limit that can not budge, even if the OOM killer has to be called.
index c7ac2f3ac99fd29bef9db55d03f01c20c9912ad9..a84a83f8881e08bf0d0d598bb01814be0ec18647 100644 (file)
                                the unplug protocol
                        never -- do not unplug even if version check succeeds
 
+       xen_legacy_crash        [X86,XEN]
+                       Crash from Xen panic notifier, without executing late
+                       panic() code such as dumping handler.
+
        xen_nopvspin    [X86,XEN]
                        Disables the ticketlock slowpath using Xen PV
                        optimizations.
index b040909e45f81d7ab9108cfd1286338fdd0d1471..02e02175e6f56a75da568cd0112bf71769b8f938 100644 (file)
@@ -154,11 +154,18 @@ return virtual addresses to userspace from a 48-bit range.
 
 Software can "opt-in" to receiving VAs from a 52-bit space by
 specifying an mmap hint parameter that is larger than 48-bit.
+
 For example:
-    maybe_high_address = mmap(~0UL, size, prot, flags,...);
+
+.. code-block:: c
+
+   maybe_high_address = mmap(~0UL, size, prot, flags,...);
 
 It is also possible to build a debug kernel that returns addresses
 from a 52-bit space by enabling the following kernel config options:
+
+.. code-block:: sh
+
    CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y
 
 Note that this option is only intended for debugging applications
index 17ea3fecddaa50ac1a1e8af59f222105622b1720..5a09661330fccfceedcb4e5879535d30b51697d3 100644 (file)
@@ -91,6 +91,11 @@ stable kernels.
 | ARM            | MMU-500         | #841119,826419  | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_845719        |
++----------------+-----------------+-----------------+-----------------------------+
+| Broadcom       | Brahma-B53      | N/A             | ARM64_ERRATUM_843419        |
++----------------+-----------------+-----------------+-----------------------------+
++----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #22375,24313    | CAVIUM_ERRATUM_22375        |
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX ITS    | #23144          | CAVIUM_ERRATUM_23144        |
@@ -107,6 +112,8 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Cavium         | ThunderX2 SMMUv3| #126            | N/A                         |
 +----------------+-----------------+-----------------+-----------------------------+
+| Cavium         | ThunderX2 Core  | #219            | CAVIUM_TX2_ERRATUM_219      |
++----------------+-----------------+-----------------+-----------------------------+
 +----------------+-----------------+-----------------+-----------------------------+
 | Freescale/NXP  | LS2080A/LS1043A | A-008585        | FSL_ERRATUM_A008585         |
 +----------------+-----------------+-----------------+-----------------------------+
@@ -124,7 +131,7 @@ stable kernels.
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | Kryo/Falkor v1  | E1003           | QCOM_FALKOR_ERRATUM_1003    |
 +----------------+-----------------+-----------------+-----------------------------+
-| Qualcomm Tech. | Falkor v1       | E1009           | QCOM_FALKOR_ERRATUM_1009    |
+| Qualcomm Tech. | Kryo/Falkor v1  | E1009           | QCOM_FALKOR_ERRATUM_1009    |
 +----------------+-----------------+-----------------+-----------------------------+
 | Qualcomm Tech. | QDF2400 ITS     | E0065           | QCOM_QDF2400_ERRATUM_0065   |
 +----------------+-----------------+-----------------+-----------------------------+
index fa16a0538dcbf4f091cb45bb72949219ed92e4f4..ab0eae1c153a42e5ebfb3b17f0e1046ab956855b 100644 (file)
@@ -38,6 +38,7 @@ Core utilities
    protection-keys
    ../RCU/index
    gcc-plugins
+   symbol-namespaces
 
 
 Interfaces for kernel debugging
index 7744aa3bf2e0bc7aa989a919382651499ca97a1b..939e3dfc86e914bcd6d3b0c881cdb68b3dc6ffd7 100644 (file)
@@ -98,6 +98,10 @@ limited. The actual limit depends on the hardware and the kernel
 configuration, but it is a good practice to use `kmalloc` for objects
 smaller than page size.
 
+The address of a chunk allocated with `kmalloc` is aligned to at least
+ARCH_KMALLOC_MINALIGN bytes.  For sizes which are a power of two, the
+alignment is also guaranteed to be at least the respective size.
+
 For large allocations you can use :c:func:`vmalloc` and
 :c:func:`vzalloc`, or directly request pages from the page
 allocator. The memory allocated by `vmalloc` and related functions is
diff --git a/Documentation/core-api/symbol-namespaces.rst b/Documentation/core-api/symbol-namespaces.rst
new file mode 100644 (file)
index 0000000..982ed7b
--- /dev/null
@@ -0,0 +1,154 @@
+=================
+Symbol Namespaces
+=================
+
+The following document describes how to use Symbol Namespaces to structure the
+export surface of in-kernel symbols exported through the family of
+EXPORT_SYMBOL() macros.
+
+.. Table of Contents
+
+       === 1 Introduction
+       === 2 How to define Symbol Namespaces
+          --- 2.1 Using the EXPORT_SYMBOL macros
+          --- 2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
+       === 3 How to use Symbols exported in Namespaces
+       === 4 Loading Modules that use namespaced Symbols
+       === 5 Automatically creating MODULE_IMPORT_NS statements
+
+1. Introduction
+===============
+
+Symbol Namespaces have been introduced as a means to structure the export
+surface of the in-kernel API. It allows subsystem maintainers to partition
+their exported symbols into separate namespaces. That is useful for
+documentation purposes (think of the SUBSYSTEM_DEBUG namespace) as well as for
+limiting the availability of a set of symbols for use in other parts of the
+kernel. As of today, modules that make use of symbols exported into namespaces,
+are required to import the namespace. Otherwise the kernel will, depending on
+its configuration, reject loading the module or warn about a missing import.
+
+2. How to define Symbol Namespaces
+==================================
+
+Symbols can be exported into namespace using different methods. All of them are
+changing the way EXPORT_SYMBOL and friends are instrumented to create ksymtab
+entries.
+
+2.1 Using the EXPORT_SYMBOL macros
+==================================
+
+In addition to the macros EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL(), that allow
+exporting of kernel symbols to the kernel symbol table, variants of these are
+available to export symbols into a certain namespace: EXPORT_SYMBOL_NS() and
+EXPORT_SYMBOL_NS_GPL(). They take one additional argument: the namespace.
+Please note that due to macro expansion that argument needs to be a
+preprocessor symbol. E.g. to export the symbol `usb_stor_suspend` into the
+namespace `USB_STORAGE`, use::
+
+       EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
+
+The corresponding ksymtab entry struct `kernel_symbol` will have the member
+`namespace` set accordingly. A symbol that is exported without a namespace will
+refer to `NULL`. There is no default namespace if none is defined. `modpost`
+and kernel/module.c make use the namespace at build time or module load time,
+respectively.
+
+2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
+=============================================
+
+Defining namespaces for all symbols of a subsystem can be very verbose and may
+become hard to maintain. Therefore a default define (DEFAULT_SYMBOL_NAMESPACE)
+is been provided, that, if set, will become the default for all EXPORT_SYMBOL()
+and EXPORT_SYMBOL_GPL() macro expansions that do not specify a namespace.
+
+There are multiple ways of specifying this define and it depends on the
+subsystem and the maintainer's preference, which one to use. The first option
+is to define the default namespace in the `Makefile` of the subsystem. E.g. to
+export all symbols defined in usb-common into the namespace USB_COMMON, add a
+line like this to drivers/usb/common/Makefile::
+
+       ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
+
+That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
+symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
+still be exported into the namespace that is passed as the namespace argument
+as this argument has preference over a default symbol namespace.
+
+A second option to define the default namespace is directly in the compilation
+unit as preprocessor statement. The above example would then read::
+
+       #undef  DEFAULT_SYMBOL_NAMESPACE
+       #define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
+
+within the corresponding compilation unit before any EXPORT_SYMBOL macro is
+used.
+
+3. How to use Symbols exported in Namespaces
+============================================
+
+In order to use symbols that are exported into namespaces, kernel modules need
+to explicitly import these namespaces. Otherwise the kernel might reject to
+load the module. The module code is required to use the macro MODULE_IMPORT_NS
+for the namespaces it uses symbols from. E.g. a module using the
+usb_stor_suspend symbol from above, needs to import the namespace USB_STORAGE
+using a statement like::
+
+       MODULE_IMPORT_NS(USB_STORAGE);
+
+This will create a `modinfo` tag in the module for each imported namespace.
+This has the side effect, that the imported namespaces of a module can be
+inspected with modinfo::
+
+       $ modinfo drivers/usb/storage/ums-karma.ko
+       [...]
+       import_ns:      USB_STORAGE
+       [...]
+
+
+It is advisable to add the MODULE_IMPORT_NS() statement close to other module
+metadata definitions like MODULE_AUTHOR() or MODULE_LICENSE(). Refer to section
+5. for a way to create missing import statements automatically.
+
+4. Loading Modules that use namespaced Symbols
+==============================================
+
+At module loading time (e.g. `insmod`), the kernel will check each symbol
+referenced from the module for its availability and whether the namespace it
+might be exported to has been imported by the module. The default behaviour of
+the kernel is to reject loading modules that don't specify sufficient imports.
+An error will be logged and loading will be failed with EINVAL. In order to
+allow loading of modules that don't satisfy this precondition, a configuration
+option is available: Setting MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y will
+enable loading regardless, but will emit a warning.
+
+5. Automatically creating MODULE_IMPORT_NS statements
+=====================================================
+
+Missing namespaces imports can easily be detected at build time. In fact,
+modpost will emit a warning if a module uses a symbol from a namespace
+without importing it.
+MODULE_IMPORT_NS() statements will usually be added at a definite location
+(along with other module meta data). To make the life of module authors (and
+subsystem maintainers) easier, a script and make target is available to fixup
+missing imports. Fixing missing imports can be done with::
+
+       $ make nsdeps
+
+A typical scenario for module authors would be::
+
+       - write code that depends on a symbol from a not imported namespace
+       - `make`
+       - notice the warning of modpost telling about a missing import
+       - run `make nsdeps` to add the import to the correct code location
+
+For subsystem maintainers introducing a namespace, the steps are very similar.
+Again, `make nsdeps` will eventually add the missing namespace imports for
+in-tree modules::
+
+       - move or add symbols to a namespace (e.g. with EXPORT_SYMBOL_NS())
+       - `make` (preferably with an allmodconfig to cover all in-kernel
+         modules)
+       - notice the warning of modpost telling about a missing import
+       - run `make nsdeps` to add the import to the correct code location
+
index b72d07d70239c421e683c9df06411f728e114a9e..525296121d8953add4af9e7144a2eb2d6ba27b1e 100644 (file)
@@ -41,6 +41,9 @@ smaller binary while the latter is 1.1 - 2 times faster.
 Both KASAN modes work with both SLUB and SLAB memory allocators.
 For better bug detection and nicer reporting, enable CONFIG_STACKTRACE.
 
+To augment reports with last allocation and freeing stack of the physical page,
+it is recommended to enable also CONFIG_PAGE_OWNER and boot with page_owner=on.
+
 To disable instrumentation for specific files or directories, add a line
 similar to the following to the respective kernel Makefile:
 
index 25604904fa6ed51292b02878691966716a68b140..ecdfdc9d4b0320f9ea4fb51974bfd5e35dfa36e0 100644 (file)
@@ -89,6 +89,22 @@ To build, save output files in a separate directory with KBUILD_OUTPUT ::
 
   $ export KBUILD_OUTPUT=/tmp/kselftest; make TARGETS="size timers" kselftest
 
+Additionally you can use the "SKIP_TARGETS" variable on the make command
+line to specify one or more targets to exclude from the TARGETS list.
+
+To run all tests but a single subsystem::
+
+  $ make -C tools/testing/selftests SKIP_TARGETS=ptrace run_tests
+
+You can specify multiple tests to skip::
+
+  $  make SKIP_TARGETS="size timers" kselftest
+
+You can also specify a restricted list of tests to run together with a
+dedicated skiplist::
+
+  $  make TARGETS="bpf breakpoints size timers" SKIP_TARGETS=bpf kselftest
+
 See the top-level tools/testing/selftests/Makefile for the list of all
 possible targets.
 
index c82c5e57d44c828c979eddd9ec43eaaf4e5fd53b..9c7e70335ac0f50d1c1678441a5a560491812fba 100644 (file)
@@ -496,12 +496,12 @@ properties:
 
       - description: Theobroma Systems RK3368-uQ7 with Haikou baseboard
         items:
-          - const: tsd,rk3368-uq7-haikou
+          - const: tsd,rk3368-lion-haikou
           - const: rockchip,rk3368
 
       - description: Theobroma Systems RK3399-Q7 with Haikou baseboard
         items:
-          - const: tsd,rk3399-q7-haikou
+          - const: tsd,rk3399-puma-haikou
           - const: rockchip,rk3399
 
       - description: Tronsmart Orion R68 Meta
index 3248595dc93c3a44e9efbf13d3fee3e19da503bb..f04870d84542299014ca338cbe3cb87e0053a0ae 100644 (file)
@@ -85,4 +85,5 @@ examples:
                         <&pd IMX_SC_R_DSP_RAM>;
         mbox-names = "txdb0", "txdb1", "rxdb0", "rxdb1";
         mboxes = <&lsio_mu13 2 0>, <&lsio_mu13 2 1>, <&lsio_mu13 3 0>, <&lsio_mu13 3 1>;
+        memory-region = <&dsp_reserved>;
     };
index 676ec42e143819665215bf79ffad2b2c148690f8..567a33a83dce8b210d69d0291a0eb5bf2a15309d 100644 (file)
@@ -43,13 +43,9 @@ properties:
 
   dvdd-supply:
     description: DVdd voltage supply
-    items:
-      - const: dvdd
 
   avdd-supply:
     description: AVdd voltage supply
-    items:
-      - const: avdd
 
   adi,rejection-60-Hz-enable:
     description: |
@@ -99,6 +95,9 @@ required:
 examples:
   - |
     spi0 {
+      #address-cells = <1>;
+      #size-cells = <0>;
+
       adc@0 {
         compatible = "adi,ad7192";
         reg = <0>;
index f4c5d34c41115878c335503185774c4c1b580aaf..7079d44bf3bad8afbaecd4a3f74760ff6aeb4b37 100644 (file)
@@ -1,8 +1,11 @@
 * Advanced Interrupt Controller (AIC)
 
 Required properties:
-- compatible: Should be "atmel,<chip>-aic"
-  <chip> can be "at91rm9200", "sama5d2", "sama5d3" or "sama5d4"
+- compatible: Should be:
+    - "atmel,<chip>-aic" where  <chip> can be "at91rm9200", "sama5d2",
+      "sama5d3" or "sama5d4"
+    - "microchip,<chip>-aic" where <chip> can be "sam9x60"
+
 - interrupt-controller: Identifies the node as an interrupt controller.
 - #interrupt-cells: The number of cells to define the interrupts. It should be 3.
   The first cell is the IRQ number (aka "Peripheral IDentifier" on datasheet).
index 27f38eed389e4830b8fb37794ac1fc89f6c962a4..d3e423fcb6c2ee643529f67527eda8ba88c3d32c 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
 %YAML 1.2
 ---
-$id: http://devicetree.org/schemas/arm/allwinner,sun4i-a10-csi.yaml#
+$id: http://devicetree.org/schemas/media/allwinner,sun4i-a10-csi.yaml#
 $schema: http://devicetree.org/meta-schemas/core.yaml#
 
 title: Allwinner A10 CMOS Sensor Interface (CSI) Device Tree Bindings
@@ -27,14 +27,12 @@ properties:
   clocks:
     items:
       - description: The CSI interface clock
-      - description: The CSI module clock
       - description: The CSI ISP clock
       - description: The CSI DRAM clock
 
   clock-names:
     items:
       - const: bus
-      - const: mod
       - const: isp
       - const: ram
 
@@ -89,9 +87,8 @@ examples:
         compatible = "allwinner,sun7i-a20-csi0";
         reg = <0x01c09000 0x1000>;
         interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                 <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-        clock-names = "bus", "mod", "isp", "ram";
+        clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+        clock-names = "bus", "isp", "ram";
         resets = <&ccu RST_CSI0>;
 
         port {
index 3d5c154fd23024169dc9664d3b8679ce2fbe3e54..9054555e6608c0f3f6de1c81528b3516028e32ac 100644 (file)
@@ -73,7 +73,6 @@ properties:
           - rc-genius-tvgo-a11mce
           - rc-gotview7135
           - rc-hauppauge
-          - rc-hauppauge
           - rc-hisi-poplar
           - rc-hisi-tv-demo
           - rc-imon-mce
index 8a56a8526cef7e2a45d2935bd5d5dbbc7ca35dc6..a97482179cf5a37c0dfc001fbb71b2f74496ee76 100644 (file)
@@ -37,7 +37,7 @@ properties:
       - description: exclusive PHY reset line
       - description: shared reset line between the PCIe PHY and PCIe controller
 
-  resets-names:
+  reset-names:
     items:
       - const: phy
       - const: pcie
index f83d888176cc1316a7ea95ab89a71e85984fbe74..064b7dfc4252c51cebfb1e124b56a41f364eb89b 100644 (file)
@@ -33,13 +33,13 @@ patternProperties:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0, GPIT1,
-              GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1, GPIU2,
-              GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11, I2C12,
-              I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7,
-              I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC,
-              LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMC,
+              ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWSPIWP, GPIT0,
+              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
+              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, I2C1, I2C10, I2C11,
+              I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6,
+              I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ,
+              LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2,
               MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2,
               NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3,
               NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1,
@@ -48,47 +48,45 @@ patternProperties:
               PWM8, PWM9, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
               RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10, SALT11, SALT12,
               SALT13, SALT14, SALT15, SALT16, SALT2, SALT3, SALT4, SALT5,
-              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SD3, SD3DAT4, SD3DAT5,
-              SD3DAT6, SD3DAT7, SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO,
-              SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1,
-              SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11,
-              TACH12, TACH13, TACH14, TACH15, TACH2, TACH3, TACH4, TACH5,
-              TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2, THRU3, TXD1,
-              TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13, UART6, UART7,
-              UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              SALT6, SALT7, SALT8, SALT9, SD1, SD2, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12, UART13,
+              UART6, UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
+              WDTRST3, WDTRST4, ]
         groups:
           allOf:
             - $ref: "/schemas/types.yaml#/definitions/string"
             - enum: [ ADC0, ADC1, ADC10, ADC11, ADC12, ADC13, ADC14, ADC15,
-              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, ESPI,
-              ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID, FWQSPID, FWSPIWP, GPIT0,
-              GPIT1, GPIT2, GPIT3, GPIT4, GPIT5, GPIT6, GPIT7, GPIU0, GPIU1,
-              GPIU2, GPIU3, GPIU4, GPIU5, GPIU6, GPIU7, HVI3C3, HVI3C4, I2C1,
-              I2C10, I2C11, I2C12, I2C13, I2C14, I2C15, I2C16, I2C2, I2C3,
-              I2C4, I2C5, I2C6, I2C7, I2C8, I2C9, I3C3, I3C4, I3C5, I3C6,
-              JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD, LPCPME, LPCSMI, LSIRQ,
-              MACLINK1, MACLINK2, MACLINK3, MACLINK4, MDIO1, MDIO2, MDIO3,
-              MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1, NDCD2, NDCD3, NDCD4,
-              NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2, NDTR3, NDTR4, NRI1,
-              NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4, OSCCLK, PEWAKE,
-              PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1, PWM12G0, PWM12G1,
-              PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0, PWM15G1, PWM2, PWM3,
-              PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1, PWM9G0, PWM9G1, QSPI1,
-              QSPI2, RGMII1, RGMII2, RGMII3, RGMII4, RMII1, RMII2, RMII3,
-              RMII4, RXD1, RXD2, RXD3, RXD4, SALT1, SALT10G0, SALT10G1,
-              SALT11G0, SALT11G1, SALT12G0, SALT12G1, SALT13G0, SALT13G1,
-              SALT14G0, SALT14G1, SALT15G0, SALT15G1, SALT16G0, SALT16G1,
-              SALT2, SALT3, SALT4, SALT5, SALT6, SALT7, SALT8, SALT9G0,
-              SALT9G1, SD1, SD2, SD3, SD3DAT4, SD3DAT5, SD3DAT6, SD3DAT7,
-              SGPM1, SGPS1, SIOONCTRL, SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD,
-              SIOS3, SIOS5, SIOSCI, SPI1, SPI1ABR, SPI1CS1, SPI1WP, SPI2,
-              SPI2CS1, SPI2CS2, TACH0, TACH1, TACH10, TACH11, TACH12, TACH13,
-              TACH14, TACH15, TACH2, TACH3, TACH4, TACH5, TACH6, TACH7, TACH8,
-              TACH9, THRU0, THRU1, THRU2, THRU3, TXD1, TXD2, TXD3, TXD4,
-              UART10, UART11, UART12G0, UART12G1, UART13G0, UART13G1, UART6,
-              UART7, UART8, UART9, VB, VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3,
-              WDTRST4, ]
+              ADC2, ADC3, ADC4, ADC5, ADC6, ADC7, ADC8, ADC9, BMCINT, EMMCG1,
+              EMMCG4, EMMCG8, ESPI, ESPIALT, FSI1, FSI2, FWSPIABR, FWSPID,
+              FWQSPID, FWSPIWP, GPIT0, GPIT1, GPIT2, GPIT3, GPIT4, GPIT5,
+              GPIT6, GPIT7, GPIU0, GPIU1, GPIU2, GPIU3, GPIU4, GPIU5, GPIU6,
+              GPIU7, HVI3C3, HVI3C4, I2C1, I2C10, I2C11, I2C12, I2C13, I2C14,
+              I2C15, I2C16, I2C2, I2C3, I2C4, I2C5, I2C6, I2C7, I2C8, I2C9,
+              I3C3, I3C4, I3C5, I3C6, JTAGM, LHPD, LHSIRQ, LPC, LPCHC, LPCPD,
+              LPCPME, LPCSMI, LSIRQ, MACLINK1, MACLINK2, MACLINK3, MACLINK4,
+              MDIO1, MDIO2, MDIO3, MDIO4, NCTS1, NCTS2, NCTS3, NCTS4, NDCD1,
+              NDCD2, NDCD3, NDCD4, NDSR1, NDSR2, NDSR3, NDSR4, NDTR1, NDTR2,
+              NDTR3, NDTR4, NRI1, NRI2, NRI3, NRI4, NRTS1, NRTS2, NRTS3, NRTS4,
+              OSCCLK, PEWAKE, PWM0, PWM1, PWM10G0, PWM10G1, PWM11G0, PWM11G1,
+              PWM12G0, PWM12G1, PWM13G0, PWM13G1, PWM14G0, PWM14G1, PWM15G0,
+              PWM15G1, PWM2, PWM3, PWM4, PWM5, PWM6, PWM7, PWM8G0, PWM8G1,
+              PWM9G0, PWM9G1, QSPI1, QSPI2, RGMII1, RGMII2, RGMII3, RGMII4,
+              RMII1, RMII2, RMII3, RMII4, RXD1, RXD2, RXD3, RXD4, SALT1,
+              SALT10G0, SALT10G1, SALT11G0, SALT11G1, SALT12G0, SALT12G1,
+              SALT13G0, SALT13G1, SALT14G0, SALT14G1, SALT15G0, SALT15G1,
+              SALT16G0, SALT16G1, SALT2, SALT3, SALT4, SALT5, SALT6, SALT7,
+              SALT8, SALT9G0, SALT9G1, SD1, SD2, SD3, SGPM1, SGPS1, SIOONCTRL,
+              SIOPBI, SIOPBO, SIOPWREQ, SIOPWRGD, SIOS3, SIOS5, SIOSCI, SPI1,
+              SPI1ABR, SPI1CS1, SPI1WP, SPI2, SPI2CS1, SPI2CS2, TACH0, TACH1,
+              TACH10, TACH11, TACH12, TACH13, TACH14, TACH15, TACH2, TACH3,
+              TACH4, TACH5, TACH6, TACH7, TACH8, TACH9, THRU0, THRU1, THRU2,
+              THRU3, TXD1, TXD2, TXD3, TXD4, UART10, UART11, UART12G0,
+              UART12G1, UART13G0, UART13G1, UART6, UART7, UART8, UART9, VB,
+              VGAHS, VGAVS, WDTRST1, WDTRST2, WDTRST3, WDTRST4, ]
 
 required:
   - compatible
index a78150c47aa2a67b8759c7a7c450e9edd20a375b..f3241696819722e3ea0d2b68799f589803f66d4b 100644 (file)
@@ -30,8 +30,8 @@ if:
 properties:
   compatible:
     enum:
-      - const: regulator-fixed
-      - const: regulator-fixed-clock
+      - regulator-fixed
+      - regulator-fixed-clock
 
   regulator-name: true
 
index b261a3015f84f3c7982397123d908beabe45e9cd..04819ad379c2d1d7bd1d91445c575455f8bb4b84 100644 (file)
@@ -24,15 +24,17 @@ description: |
 
 properties:
   compatible:
-    items:
-      - enum:
-          - sifive,rocket0
-          - sifive,e5
-          - sifive,e51
-          - sifive,u54-mc
-          - sifive,u54
-          - sifive,u5
-      - const: riscv
+    oneOf:
+      - items:
+          - enum:
+              - sifive,rocket0
+              - sifive,e5
+              - sifive,e51
+              - sifive,u54-mc
+              - sifive,u54
+              - sifive,u5
+          - const: riscv
+      - const: riscv    # Simulator only
     description:
       Identifies that the hart uses the RISC-V instruction set
       and identifies the type of the hart.
@@ -66,12 +68,8 @@ properties:
       insensitive, letters in the riscv,isa string must be all
       lowercase to simplify parsing.
 
-  timebase-frequency:
-    type: integer
-    minimum: 1
-    description:
-      Specifies the clock frequency of the system timer in Hz.
-      This value is common to all harts on a single system image.
+  # RISC-V requires 'timebase-frequency' in /cpus, so disallow it here
+  timebase-frequency: false
 
   interrupt-controller:
     type: object
@@ -93,7 +91,6 @@ properties:
 
 required:
   - riscv,isa
-  - timebase-frequency
   - interrupt-controller
 
 examples:
index dd63151dc8b6d3e1644783fd8edc83aa86ab1b27..b143d9a21b2de13e528659fc97577335abf799e3 100644 (file)
@@ -26,6 +26,8 @@ Required properties:
     - "renesas,hscif-r8a77470" for R8A77470 (RZ/G1C) HSCIF compatible UART.
     - "renesas,scif-r8a774a1" for R8A774A1 (RZ/G2M) SCIF compatible UART.
     - "renesas,hscif-r8a774a1" for R8A774A1 (RZ/G2M) HSCIF compatible UART.
+    - "renesas,scif-r8a774b1" for R8A774B1 (RZ/G2N) SCIF compatible UART.
+    - "renesas,hscif-r8a774b1" for R8A774B1 (RZ/G2N) HSCIF compatible UART.
     - "renesas,scif-r8a774c0" for R8A774C0 (RZ/G2E) SCIF compatible UART.
     - "renesas,hscif-r8a774c0" for R8A774C0 (RZ/G2E) HSCIF compatible UART.
     - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
index b9f04e617eb770d7ceb4ecdba62f3d8ca984a4d4..6ffb09be7a76a2703ed0cc81bf8ca96adbb94f25 100644 (file)
@@ -85,8 +85,8 @@ A child node must exist to represent the core DWC2 IP block. The name of
 the node is not important. The content of the node is defined in dwc2.txt.
 
 PHY documentation is provided in the following places:
-- Documentation/devicetree/bindings/phy/meson-g12a-usb2-phy.txt
-- Documentation/devicetree/bindings/phy/meson-g12a-usb3-pcie-phy.txt
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb2-phy.yaml
+- Documentation/devicetree/bindings/phy/amlogic,meson-g12a-usb3-pcie-phy.yaml
 
 Example device nodes:
        usb: usb@ffe09000 {
index 059f6ef1ad4a5a587055615355cbae5c3140f26c..1ca64c85191aade4f1f753a300ea75060dcb3ade 100644 (file)
@@ -63,7 +63,11 @@ properties:
     description:
       Set this flag to force EHCI reset after resume.
 
-  phys: true
+  phys:
+    description: PHY specifier for the USB PHY
+
+  phy-names:
+    const: usb
 
 required:
   - compatible
@@ -89,6 +93,7 @@ examples:
         interrupts = <39>;
         clocks = <&ahb_gates 1>;
         phys = <&usbphy 1>;
+        phy-names = "usb";
     };
 
 ...
index da5a14becbe56931ef18aa2d58654f9e65f56ea4..bcffec1f1341e502c634bdbde4ebc007c6575f99 100644 (file)
@@ -67,7 +67,11 @@ properties:
     description:
       Overrides the detected port count
 
-  phys: true
+  phys:
+    description: PHY specifier for the USB PHY
+
+  phy-names:
+    const: usb
 
 required:
   - compatible
@@ -84,6 +88,7 @@ examples:
           interrupts = <64>;
           clocks = <&usb_clk 6>, <&ahb_gates 2>;
           phys = <&usbphy 1>;
+          phy-names = "usb";
       };
 
 ...
index f3e4acecabe83f4f8e438273795702439dcb216e..42d8814f903a9ca36ece3a0eec41bbdae9f555bd 100644 (file)
@@ -33,7 +33,7 @@ Required properties:
        "dma_ck": dma_bus clock for data transfer by DMA,
        "xhci_ck": controller clock
 
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
 
 Optional properties:
  - wakeup-source : enable USB remote wakeup;
@@ -53,7 +53,7 @@ Optional properties:
        See: Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
  - imod-interval-ns: default interrupt moderation interval is 5000ns
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Example:
index b9af7f5ee91d6f7dbb73f050c4ef3af947e29d90..e0ae6096f7ac8f1fd3b607c831a4e4a43dc44bf4 100644 (file)
@@ -17,7 +17,7 @@ Required properties:
  - clock-names : must contain "sys_ck" for clock of controller,
        the following clocks are optional:
        "ref_ck", "mcu_ck" and "dma_ck";
- - phys : see usb-hcd.txt in the current directory
+ - phys : see usb-hcd.yaml in the current directory
  - dr_mode : should be one of "host", "peripheral" or "otg",
        refer to usb/generic.txt
 
@@ -60,7 +60,7 @@ Optional properties:
  - mediatek,u3p-dis-msk : mask to disable u3ports, bit0 for u3port0,
        bit1 for u3port1, ... etc;
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Sub-nodes:
index 9c8c56d3a79284c36a6fdb42aff78cd3366e90a7..7263b7f2b510db09c890187b222c5f254e480121 100644 (file)
@@ -18,8 +18,13 @@ properties:
     description:
       List of all the USB PHYs on this HCD
 
+  phy-names:
+    description:
+      Name specifier for the USB PHY
+
 examples:
   - |
     usb {
         phys = <&usb2_phy1>, <&usb3_phy1>;
+        phy-names = "usb";
     };
index cc2e6f7d602e1f8376233b894538827b4de54632..d1702eb2c8bd83d3f5c1c6cfaca950608ca5c12b 100644 (file)
@@ -6,7 +6,7 @@ Required properties:
 - reg : Should contain 1 register ranges(address and length)
 - interrupts : UHCI controller interrupt
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 Example:
index 97400e8f86057709946f9b715250e8d529ffd77d..b49b819571f9c5bfe3d699c6486b6474edaef2b6 100644 (file)
@@ -41,9 +41,9 @@ Optional properties:
   - usb3-lpm-capable: determines if platform is USB3 LPM capable
   - quirk-broken-port-ped: set if the controller has broken port disable mechanism
   - imod-interval-ns: default interrupt moderation interval is 5000ns
-  - phys : see usb-hcd.txt in the current directory
+  - phys : see usb-hcd.yaml in the current directory
 
-additionally the properties from usb-hcd.txt (in the current directory) are
+additionally the properties from usb-hcd.yaml (in the current directory) are
 supported.
 
 
index 8147c3f218bfb11167ca160ebc69c3d33b5b07f4..230ad59b462b659cba1bbe580cdb8588570a64f4 100644 (file)
@@ -7,6 +7,7 @@ Linux Hardware Monitoring
 
    hwmon-kernel-api
    pmbus-core
+   inspur-ipsps1
    submitting-patches
    sysfs-interface
    userspace-tools
index 2b871ae3448f289d433cb932d3be641e3748f608..292c0c26bdd1936fce44d8bbc2368ab0eceb098b 100644 (file)
@@ -1,5 +1,5 @@
 Kernel driver inspur-ipsps1
-=======================
+===========================
 
 Supported chips:
 
index 12a86ba17de9defff812c3ef248dc02b6421130b..4451d59b942591ed82cee27f5666d5f8aecba169 100644 (file)
@@ -21,10 +21,17 @@ Supported chips:
 
 * AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
 
-* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri", "Carrizo"
+* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity", "Kaveri",
+  "Carrizo", "Stoney Ridge", "Bristol Ridge"
 
 * AMD Family 16h processors: "Kabini", "Mullins"
 
+* AMD Family 17h processors: "Zen", "Zen 2"
+
+* AMD Family 18h processors: "Hygon Dhyana"
+
+* AMD Family 19h processors: "Zen 3"
+
   Prefix: 'k10temp'
 
   Addresses scanned: PCI space
@@ -110,3 +117,12 @@ The maximum value for Tctl is available in the file temp1_max.
 If the BIOS has enabled hardware temperature control, the threshold at
 which the processor will throttle itself to avoid damage is available in
 temp1_crit and temp1_crit_hyst.
+
+On some AMD CPUs, there is a difference between the die temperature (Tdie) and
+the reported temperature (Tctl). Tdie is the real measured temperature, and
+Tctl is used for fan control. While Tctl is always available as temp1_input,
+the driver exports Tdie temperature as temp2_input for those CPUs which support
+it.
+
+Models from 17h family report relative temperature, the driver aims to
+compensate and report the real temperature.
index 6ba9d5365ff3dd6c80085da47ff3ccd9237dd40e..b89c88168d6a3110be823cbff8dfbdf8d3d56b22 100644 (file)
@@ -954,11 +954,6 @@ When kbuild executes, the following steps are followed (roughly):
 
        From commandline LDFLAGS_MODULE shall be used (see kbuild.txt).
 
-    KBUILD_ARFLAGS   Options for $(AR) when creating archives
-
-       $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
-       mode) if this option is supported by $(AR).
-
     KBUILD_LDS
 
        The linker script with full path. Assigned by the top-level Makefile.
index d2ae799237fd8ebf8b5510e73fcf922feaf341f1..774a998dcf370e8395d988aca75de224562ae16b 100644 (file)
@@ -498,10 +498,11 @@ build.
        will be written containing all exported symbols that were not
        defined in the kernel.
 
---- 6.3 Symbols From Another External Module
+6.3 Symbols From Another External Module
+----------------------------------------
 
        Sometimes, an external module uses exported symbols from
-       another external module. kbuild needs to have full knowledge of
+       another external module. Kbuild needs to have full knowledge of
        all symbols to avoid spitting out warnings about undefined
        symbols. Three solutions exist for this situation.
 
@@ -521,7 +522,7 @@ build.
                The top-level kbuild file would then look like::
 
                        #./Kbuild (or ./Makefile):
-                               obj-y := foo/ bar/
+                               obj-m := foo/ bar/
 
                And executing::
 
diff --git a/Documentation/kbuild/namespaces.rst b/Documentation/kbuild/namespaces.rst
deleted file mode 100644 (file)
index 982ed7b..0000000
+++ /dev/null
@@ -1,154 +0,0 @@
-=================
-Symbol Namespaces
-=================
-
-The following document describes how to use Symbol Namespaces to structure the
-export surface of in-kernel symbols exported through the family of
-EXPORT_SYMBOL() macros.
-
-.. Table of Contents
-
-       === 1 Introduction
-       === 2 How to define Symbol Namespaces
-          --- 2.1 Using the EXPORT_SYMBOL macros
-          --- 2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
-       === 3 How to use Symbols exported in Namespaces
-       === 4 Loading Modules that use namespaced Symbols
-       === 5 Automatically creating MODULE_IMPORT_NS statements
-
-1. Introduction
-===============
-
-Symbol Namespaces have been introduced as a means to structure the export
-surface of the in-kernel API. It allows subsystem maintainers to partition
-their exported symbols into separate namespaces. That is useful for
-documentation purposes (think of the SUBSYSTEM_DEBUG namespace) as well as for
-limiting the availability of a set of symbols for use in other parts of the
-kernel. As of today, modules that make use of symbols exported into namespaces,
-are required to import the namespace. Otherwise the kernel will, depending on
-its configuration, reject loading the module or warn about a missing import.
-
-2. How to define Symbol Namespaces
-==================================
-
-Symbols can be exported into namespace using different methods. All of them are
-changing the way EXPORT_SYMBOL and friends are instrumented to create ksymtab
-entries.
-
-2.1 Using the EXPORT_SYMBOL macros
-==================================
-
-In addition to the macros EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL(), that allow
-exporting of kernel symbols to the kernel symbol table, variants of these are
-available to export symbols into a certain namespace: EXPORT_SYMBOL_NS() and
-EXPORT_SYMBOL_NS_GPL(). They take one additional argument: the namespace.
-Please note that due to macro expansion that argument needs to be a
-preprocessor symbol. E.g. to export the symbol `usb_stor_suspend` into the
-namespace `USB_STORAGE`, use::
-
-       EXPORT_SYMBOL_NS(usb_stor_suspend, USB_STORAGE);
-
-The corresponding ksymtab entry struct `kernel_symbol` will have the member
-`namespace` set accordingly. A symbol that is exported without a namespace will
-refer to `NULL`. There is no default namespace if none is defined. `modpost`
-and kernel/module.c make use the namespace at build time or module load time,
-respectively.
-
-2.2 Using the DEFAULT_SYMBOL_NAMESPACE define
-=============================================
-
-Defining namespaces for all symbols of a subsystem can be very verbose and may
-become hard to maintain. Therefore a default define (DEFAULT_SYMBOL_NAMESPACE)
-is been provided, that, if set, will become the default for all EXPORT_SYMBOL()
-and EXPORT_SYMBOL_GPL() macro expansions that do not specify a namespace.
-
-There are multiple ways of specifying this define and it depends on the
-subsystem and the maintainer's preference, which one to use. The first option
-is to define the default namespace in the `Makefile` of the subsystem. E.g. to
-export all symbols defined in usb-common into the namespace USB_COMMON, add a
-line like this to drivers/usb/common/Makefile::
-
-       ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=USB_COMMON
-
-That will affect all EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL() statements. A
-symbol exported with EXPORT_SYMBOL_NS() while this definition is present, will
-still be exported into the namespace that is passed as the namespace argument
-as this argument has preference over a default symbol namespace.
-
-A second option to define the default namespace is directly in the compilation
-unit as preprocessor statement. The above example would then read::
-
-       #undef  DEFAULT_SYMBOL_NAMESPACE
-       #define DEFAULT_SYMBOL_NAMESPACE USB_COMMON
-
-within the corresponding compilation unit before any EXPORT_SYMBOL macro is
-used.
-
-3. How to use Symbols exported in Namespaces
-============================================
-
-In order to use symbols that are exported into namespaces, kernel modules need
-to explicitly import these namespaces. Otherwise the kernel might reject to
-load the module. The module code is required to use the macro MODULE_IMPORT_NS
-for the namespaces it uses symbols from. E.g. a module using the
-usb_stor_suspend symbol from above, needs to import the namespace USB_STORAGE
-using a statement like::
-
-       MODULE_IMPORT_NS(USB_STORAGE);
-
-This will create a `modinfo` tag in the module for each imported namespace.
-This has the side effect, that the imported namespaces of a module can be
-inspected with modinfo::
-
-       $ modinfo drivers/usb/storage/ums-karma.ko
-       [...]
-       import_ns:      USB_STORAGE
-       [...]
-
-
-It is advisable to add the MODULE_IMPORT_NS() statement close to other module
-metadata definitions like MODULE_AUTHOR() or MODULE_LICENSE(). Refer to section
-5. for a way to create missing import statements automatically.
-
-4. Loading Modules that use namespaced Symbols
-==============================================
-
-At module loading time (e.g. `insmod`), the kernel will check each symbol
-referenced from the module for its availability and whether the namespace it
-might be exported to has been imported by the module. The default behaviour of
-the kernel is to reject loading modules that don't specify sufficient imports.
-An error will be logged and loading will be failed with EINVAL. In order to
-allow loading of modules that don't satisfy this precondition, a configuration
-option is available: Setting MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS=y will
-enable loading regardless, but will emit a warning.
-
-5. Automatically creating MODULE_IMPORT_NS statements
-=====================================================
-
-Missing namespaces imports can easily be detected at build time. In fact,
-modpost will emit a warning if a module uses a symbol from a namespace
-without importing it.
-MODULE_IMPORT_NS() statements will usually be added at a definite location
-(along with other module meta data). To make the life of module authors (and
-subsystem maintainers) easier, a script and make target is available to fixup
-missing imports. Fixing missing imports can be done with::
-
-       $ make nsdeps
-
-A typical scenario for module authors would be::
-
-       - write code that depends on a symbol from a not imported namespace
-       - `make`
-       - notice the warning of modpost telling about a missing import
-       - run `make nsdeps` to add the import to the correct code location
-
-For subsystem maintainers introducing a namespace, the steps are very similar.
-Again, `make nsdeps` will eventually add the missing namespace imports for
-in-tree modules::
-
-       - move or add symbols to a namespace (e.g. with EXPORT_SYMBOL_NS())
-       - `make` (preferably with an allmodconfig to cover all in-kernel
-         modules)
-       - notice the warning of modpost telling about a missing import
-       - run `make nsdeps` to add the import to the correct code location
-
index ab92e98c89c86cfc8bb5ef428167121b5f5290d6..503393854e2e2a7bbe12d51de0c848f293ea4fe4 100644 (file)
@@ -16,16 +16,21 @@ the kernel may be unreproducible, and how to avoid them.
 Timestamps
 ----------
 
-The kernel embeds a timestamp in two places:
+The kernel embeds timestamps in three places:
 
 * The version string exposed by ``uname()`` and included in
   ``/proc/version``
 
 * File timestamps in the embedded initramfs
 
-By default the timestamp is the current time.  This must be overridden
-using the `KBUILD_BUILD_TIMESTAMP`_ variable.  If you are building
-from a git commit, you could use its commit date.
+* If enabled via ``CONFIG_IKHEADERS``, file timestamps of kernel
+  headers embedded in the kernel or respective module,
+  exposed via ``/sys/kernel/kheaders.tar.xz``
+
+By default the timestamp is the current time and in the case of
+``kheaders`` the various files' modification times. This must
+be overridden using the `KBUILD_BUILD_TIMESTAMP`_ variable.
+If you are building from a git commit, you could use its commit date.
 
 The kernel does *not* use the ``__DATE__`` and ``__TIME__`` macros,
 and enables warnings if they are used.  If you incorporate external
index f51f92571e395409e96cecd25ee4a3d6721a859c..c1f7f75e5fd9022a813520322ca8ac3626dd91c6 100644 (file)
@@ -23,6 +23,7 @@ Contents:
    intel/ice
    google/gve
    mellanox/mlx5
+   netronome/nfp
    pensando/ionic
 
 .. only::  subproject and html
index 2b9f4887beda8970e18d7f93feef61504307668a..caf023cc88dead5ccf6a4f9bb4d95a4e6eb5296f 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
-==============================================================
+=============================================================
+Linux Base Driver for the Intel(R) PRO/100 Family of Adapters
+=============================================================
 
 June 1, 2018
 
@@ -21,7 +21,7 @@ Contents
 In This Release
 ===============
 
-This file describes the Linux* Base Driver for the Intel(R) PRO/100 Family of
+This file describes the Linux Base Driver for the Intel(R) PRO/100 Family of
 Adapters. This driver includes support for Itanium(R)2-based systems.
 
 For questions related to hardware requirements, refer to the documentation
@@ -138,9 +138,9 @@ version 1.6 or later is required for this functionality.
 The latest release of ethtool can be found from
 https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is provided through the ethtool* utility.  For instructions on
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is provided through the ethtool utility.  For instructions on
 enabling WoL with ethtool, refer to the ethtool man page.  WoL will be
 enabled on the system during the next shut down or reboot.  For this
 driver version, in order to enable WoL, the e100 driver must be loaded
index 956560b6e745ce6f45130fd24cbd0593f946c327..4aaae0f7d6ba7fb3b5290a05599605bff586193a 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999 - 2013 Intel Corporation.
@@ -438,10 +438,10 @@ ethtool
   The latest release of ethtool can be found from
   https://www.kernel.org/pub/software/network/ethtool/
 
-Enabling Wake on LAN* (WoL)
----------------------------
+Enabling Wake on LAN (WoL)
+--------------------------
 
-  WoL is configured through the ethtool* utility.
+  WoL is configured through the ethtool utility.
 
   WoL will be enabled on the system during the next shut down or reboot.
   For this driver version, in order to enable WoL, the e1000 driver must be
index 01999f05509c425010b5c944d4ac2932da00ad5a..f49cd370e7bf622d3c8bae7a56a86f4360499400 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-======================================================
-Linux* Driver for Intel(R) Ethernet Network Connection
-======================================================
+=====================================================
+Linux Driver for Intel(R) Ethernet Network Connection
+=====================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 2008-2018 Intel Corporation.
@@ -338,7 +338,7 @@ and higher cannot be forced. Use the autonegotiation advertising setting to
 manually set devices for 1 Gbps and higher.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
@@ -351,9 +351,9 @@ will not attempt to auto-negotiate with its link partner since those adapters
 operate only in full duplex and only at their native speed.
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the e1000e driver must be loaded
index ac3269e34f552206f206a31231e8338f5426f60d..4d279e64e221e6475b291e98c7fde7a3a882caaf 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==============================================================
-Linux* Base Driver for Intel(R) Ethernet Multi-host Controller
-==============================================================
+=============================================================
+Linux Base Driver for Intel(R) Ethernet Multi-host Controller
+=============================================================
 
 August 20, 2018
 Copyright(c) 2015-2018 Intel Corporation.
@@ -120,8 +120,8 @@ rx-flow-hash tcp4|udp4|ah4|esp4|sctp4|tcp6|udp6|ah6|esp6|sctp6 m|v|t|s|d|f|n|r
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS under Linux KVM
----------------------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS under Linux KVM
+-------------------------------------------------------------------------------------
 KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
 includes traditional PCIe devices, as well as SR-IOV-capable devices based on
 the Intel Ethernet Controller XL710.
index 848fd388fa6e01403f7d1f67d2256463f4ebe228..8a9b18573688d87c9fc521d00b129bd7ae7c61b7 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for the Intel(R) Ethernet Controller 700 Series
-==================================================================
+=================================================================
+Linux Base Driver for the Intel(R) Ethernet Controller 700 Series
+=================================================================
 
 Intel 40 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -384,7 +384,7 @@ NOTE: You cannot set the speed for devices based on the Intel(R) Ethernet
 Network Adapter XXV710 based devices.
 
 Speed, duplex, and autonegotiation advertising are configured through the
-ethtool* utility.
+ethtool utility.
 
 Caution: Only experienced network administrators should force speed and duplex
 or change autonegotiation advertising manually. The settings at the switch must
index cfc08842e32c6e4c00839c13a2861c5e32bb890d..84ac7e75f36313a5343709f65b1bc9bcef8fdc19 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==================================================================
-Linux* Base Driver for Intel(R) Ethernet Adaptive Virtual Function
-==================================================================
+=================================================================
+Linux Base Driver for Intel(R) Ethernet Adaptive Virtual Function
+=================================================================
 
 Intel Ethernet Adaptive Virtual Function Linux driver.
 Copyright(c) 2013-2018 Intel Corporation.
@@ -19,7 +19,7 @@ Contents
 Overview
 ========
 
-This file describes the iavf Linux* Base Driver. This driver was formerly
+This file describes the iavf Linux Base Driver. This driver was formerly
 called i40evf.
 
 The iavf driver supports the below mentioned virtual function devices and
index c220aa2711c67c3983ae1090a54973868837b95a..ee43ea57d4430ca78b98efa2e7807a3ec4dbaa42 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===================================================================
-Linux* Base Driver for the Intel(R) Ethernet Connection E800 Series
-===================================================================
+==================================================================
+Linux Base Driver for the Intel(R) Ethernet Connection E800 Series
+==================================================================
 
 Intel ice Linux driver.
 Copyright(c) 2018 Intel Corporation.
index fc8cfaa5dcfaca38d70b31f2a0e1df8134d380a7..87e560fe5eaaba65bfe1840504129afb533671ae 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-===========================================================
-Linux* Base Driver for Intel(R) Ethernet Network Connection
-===========================================================
+==========================================================
+Linux Base Driver for Intel(R) Ethernet Network Connection
+==========================================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -129,9 +129,9 @@ version is required for this functionality. Download it at:
 https://www.kernel.org/pub/software/network/ethtool/
 
 
-Enabling Wake on LAN* (WoL)
----------------------------
-WoL is configured through the ethtool* utility.
+Enabling Wake on LAN (WoL)
+--------------------------
+WoL is configured through the ethtool utility.
 
 WoL will be enabled on the system during the next shut down or reboot. For
 this driver version, in order to enable WoL, the igb driver must be loaded
index 9cddabe8108ead0e2bcd93d71626ba8214244f8a..557fc020ef31e0f3c403bd38dedb75c0582d2fe5 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-============================================================
-Linux* Base Virtual Function Driver for Intel(R) 1G Ethernet
-============================================================
+===========================================================
+Linux Base Virtual Function Driver for Intel(R) 1G Ethernet
+===========================================================
 
 Intel Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index c7d25483fedb20c049c645835ec13ad7600acfce..f1d5233e5e510929ba6d3f4f8cd049dc8767677c 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================================
-Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
-=============================================================================
+===========================================================================
+Linux Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Adapters
+===========================================================================
 
 Intel 10 Gigabit Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
@@ -519,8 +519,8 @@ The offload is also supported for ixgbe's VFs, but the VF must be set as
 Known Issues/Troubleshooting
 ============================
 
-Enabling SR-IOV in a 64-bit Microsoft* Windows Server* 2012/R2 guest OS
------------------------------------------------------------------------
+Enabling SR-IOV in a 64-bit Microsoft Windows Server 2012/R2 guest OS
+---------------------------------------------------------------------
 Linux KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.
 This includes traditional PCIe devices, as well as SR-IOV-capable devices based
 on the Intel Ethernet Controller XL710.
index 5d4977360157ed87b87b001c74bbe314c9fbe0d5..76bbde736f21546071167609294a83a91a2307fd 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-=============================================================
-Linux* Base Virtual Function Driver for Intel(R) 10G Ethernet
-=============================================================
+============================================================
+Linux Base Virtual Function Driver for Intel(R) 10G Ethernet
+============================================================
 
 Intel 10 Gigabit Virtual Function Linux driver.
 Copyright(c) 1999-2018 Intel Corporation.
index 67b6839d516b976d5c850d51d78dc6c5fcb64e1a..c17d680cf334aeebcb7aa905b01151b99e2a68f9 100644 (file)
@@ -1,8 +1,8 @@
 .. SPDX-License-Identifier: GPL-2.0+
 
-==========================================================
-Linux* Driver for the Pensando(R) Ethernet adapter family
-==========================================================
+========================================================
+Linux Driver for the Pensando(R) Ethernet adapter family
+========================================================
 
 Pensando Linux Ethernet driver.
 Copyright(c) 2019 Pensando Systems, Inc
@@ -36,8 +36,10 @@ Support
 =======
 For general Linux networking support, please use the netdev mailing
 list, which is monitored by Pensando personnel::
+
   netdev@vger.kernel.org
 
 For more specific support needs, please use the Pensando driver support
 email::
-       drivers@pensando.io
+
+  drivers@pensando.io
index 49e95f438ed7571a93bceffdc17846c35dd64fca..8d4ad1d1ae26f11ffe0ff1db61c61f20b573b8b8 100644 (file)
@@ -207,8 +207,8 @@ TCP variables:
 
 somaxconn - INTEGER
        Limit of socket listen() backlog, known in userspace as SOMAXCONN.
-       Defaults to 128.  See also tcp_max_syn_backlog for additional tuning
-       for TCP sockets.
+       Defaults to 4096. (Was 128 before linux-5.4)
+       See also tcp_max_syn_backlog for additional tuning for TCP sockets.
 
 tcp_abort_on_overflow - BOOLEAN
        If listening service is too slow to accept new connections,
@@ -408,11 +408,14 @@ tcp_max_orphans - INTEGER
        up to ~64K of unswappable memory.
 
 tcp_max_syn_backlog - INTEGER
-       Maximal number of remembered connection requests, which have not
-       received an acknowledgment from connecting client.
+       Maximal number of remembered connection requests (SYN_RECV),
+       which have not received an acknowledgment from connecting client.
+       This is a per-listener limit.
        The minimal value is 128 for low memory machines, and it will
        increase in proportion to the memory of machine.
        If server suffers from overload, try increasing this number.
+       Remember to also check /proc/sys/net/core/somaxconn
+       A SYN_RECV request socket consumes about 304 bytes of memory.
 
 tcp_max_tw_buckets - INTEGER
        Maximal number of timewait sockets held by system simultaneously.
index ce7e7a044e080e9209e13b9b4915b209e56b4574..dc60b13fcd09653a95eda517e9ccdc38e50c26cf 100644 (file)
@@ -272,7 +272,7 @@ supported flags are:
 * MSG_DONTWAIT, i.e. non-blocking operation.
 
 recvmsg(2)
-^^^^^^^^^
+^^^^^^^^^^
 
 In most cases recvmsg(2) is needed if you want to extract more information than
 recvfrom(2) can provide. For example package priority and timestamp. The
index 9cb31c5e2dcdf465e4efda3f8543c2857a7c3ec6..9bdb7d5a3ba3ae4974a4a5875313707d23476abf 100644 (file)
@@ -92,16 +92,16 @@ under some conditions.
 Part III: Registering a Network Device to DIM
 ==============================================
 
-Net DIM API exposes the main function net_dim(struct net_dim *dim,
-struct net_dim_sample end_sample). This function is the entry point to the Net
+Net DIM API exposes the main function net_dim(struct dim *dim,
+struct dim_sample end_sample). This function is the entry point to the Net
 DIM algorithm and has to be called every time the driver would like to check if
 it should change interrupt moderation parameters. The driver should provide two
-data structures: struct net_dim and struct net_dim_sample. Struct net_dim
+data structures: struct dim and struct dim_sample. Struct dim
 describes the state of DIM for a specific object (RX queue, TX queue,
 other queues, etc.). This includes the current selected profile, previous data
 samples, the callback function provided by the driver and more.
-Struct net_dim_sample describes a data sample, which will be compared to the
-data sample stored in struct net_dim in order to decide on the algorithm's next
+Struct dim_sample describes a data sample, which will be compared to the
+data sample stored in struct dim in order to decide on the algorithm's next
 step. The sample should include bytes, packets and interrupts, measured by
 the driver.
 
@@ -110,9 +110,9 @@ main net_dim() function. The recommended method is to call net_dim() on each
 interrupt. Since Net DIM has a built-in moderation and it might decide to skip
 iterations under certain conditions, there is no need to moderate the net_dim()
 calls as well. As mentioned above, the driver needs to provide an object of type
-struct net_dim to the net_dim() function call. It is advised for each entity
-using Net DIM to hold a struct net_dim as part of its data structure and use it
-as the main Net DIM API object. The struct net_dim_sample should hold the latest
+struct dim to the net_dim() function call. It is advised for each entity
+using Net DIM to hold a struct dim as part of its data structure and use it
+as the main Net DIM API object. The struct dim_sample should hold the latest
 bytes, packets and interrupts count. No need to perform any calculations, just
 include the raw data.
 
@@ -132,19 +132,19 @@ usage is not complete but it should make the outline of the usage clear.
 
 my_driver.c:
 
-#include <linux/net_dim.h>
+#include <linux/dim.h>
 
 /* Callback for net DIM to schedule on a decision to change moderation */
 void my_driver_do_dim_work(struct work_struct *work)
 {
-       /* Get struct net_dim from struct work_struct */
-       struct net_dim *dim = container_of(work, struct net_dim,
-                                          work);
+       /* Get struct dim from struct work_struct */
+       struct dim *dim = container_of(work, struct dim,
+                                      work);
        /* Do interrupt moderation related stuff */
        ...
 
        /* Signal net DIM work is done and it should move to next iteration */
-       dim->state = NET_DIM_START_MEASURE;
+       dim->state = DIM_START_MEASURE;
 }
 
 /* My driver's interrupt handler */
@@ -152,13 +152,13 @@ int my_driver_handle_interrupt(struct my_driver_entity *my_entity, ...)
 {
        ...
        /* A struct to hold current measured data */
-       struct net_dim_sample dim_sample;
+       struct dim_sample dim_sample;
        ...
        /* Initiate data sample struct with current data */
-       net_dim_sample(my_entity->events,
-                      my_entity->packets,
-                      my_entity->bytes,
-                      &dim_sample);
+       dim_update_sample(my_entity->events,
+                         my_entity->packets,
+                         my_entity->bytes,
+                         &dim_sample);
        /* Call net DIM */
        net_dim(&my_entity->dim, dim_sample);
        ...
index f4a2198187f9b6b18ba16230f3947d2e50c9328a..ada573b7d703bca0b0ab725e2714f84f6dd2bd1f 100644 (file)
@@ -56,7 +56,7 @@ instead of ``double-indenting`` the ``case`` labels.  E.g.:
        case 'K':
        case 'k':
                mem <<= 10;
-               /* fall through */
+               fallthrough;
        default:
                break;
        }
index 053b24a6dd38cce19fc6d5ae1ed800dcb55cab00..179f2a5625a0e7db8f0a120cac1dab875f800bda 100644 (file)
@@ -122,14 +122,27 @@ memory adjacent to the stack (when built without `CONFIG_VMAP_STACK=y`)
 
 Implicit switch case fall-through
 ---------------------------------
-The C language allows switch cases to "fall through" when
-a "break" statement is missing at the end of a case. This,
-however, introduces ambiguity in the code, as it's not always
-clear if the missing break is intentional or a bug. As there
-have been a long list of flaws `due to missing "break" statements
+The C language allows switch cases to "fall-through" when a "break" statement
+is missing at the end of a case. This, however, introduces ambiguity in the
+code, as it's not always clear if the missing break is intentional or a bug.
+
+As there have been a long list of flaws `due to missing "break" statements
 <https://cwe.mitre.org/data/definitions/484.html>`_, we no longer allow
-"implicit fall-through". In order to identify an intentional fall-through
-case, we have adopted the marking used by static analyzers: a comment
-saying `/* Fall through */`. Once the C++17 `__attribute__((fallthrough))`
-is more widely handled by C compilers, static analyzers, and IDEs, we can
-switch to using that instead.
+"implicit fall-through".
+
+In order to identify intentional fall-through cases, we have adopted a
+pseudo-keyword macro 'fallthrough' which expands to gcc's extension
+__attribute__((__fallthrough__)).  `Statement Attributes
+<https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html>`_
+
+When the C17/C18  [[fallthrough]] syntax is more commonly supported by
+C compilers, static analyzers, and IDEs, we can switch to using that syntax
+for the macro pseudo-keyword.
+
+All switch/case blocks must end in one of:
+
+       break;
+       fallthrough;
+       continue;
+       goto <label>;
+       return [expression];
diff --git a/Documentation/usb/rio.rst b/Documentation/usb/rio.rst
deleted file mode 100644 (file)
index ea73475..0000000
+++ /dev/null
@@ -1,109 +0,0 @@
-============
-Diamonds Rio
-============
-
-Copyright (C) 1999, 2000 Bruce Tenison
-
-Portions Copyright (C) 1999, 2000 David Nelson
-
-Thanks to David Nelson for guidance and the usage of the scanner.txt
-and scanner.c files to model our driver and this informative file.
-
-Mar. 2, 2000
-
-Changes
-=======
-
-- Initial Revision
-
-
-Overview
-========
-
-This README will address issues regarding how to configure the kernel
-to access a RIO 500 mp3 player.
-Before I explain how to use this to access the Rio500 please be warned:
-
-.. warning::
-
-   Please note that this software is still under development.  The authors
-   are in no way responsible for any damage that may occur, no matter how
-   inconsequential.
-
-It seems that the Rio has a problem when sending .mp3 with low batteries.
-I suggest when the batteries are low and you want to transfer stuff that you
-replace it with a fresh one. In my case, what happened is I lost two 16kb
-blocks (they are no longer usable to store information to it). But I don't
-know if that's normal or not; it could simply be a problem with the flash
-memory.
-
-In an extreme case, I left my Rio playing overnight and the batteries wore
-down to nothing and appear to have corrupted the flash memory. My RIO
-needed to be replaced as a result.  Diamond tech support is aware of the
-problem.  Do NOT allow your batteries to wear down to nothing before
-changing them.  It appears RIO 500 firmware does not handle low battery
-power well at all.
-
-On systems with OHCI controllers, the kernel OHCI code appears to have
-power on problems with some chipsets.  If you are having problems
-connecting to your RIO 500, try turning it on first and then plugging it
-into the USB cable.
-
-Contact Information
--------------------
-
-   The main page for the project is hosted at sourceforge.net in the following
-   URL: <http://rio500.sourceforge.net>. You can also go to the project's
-   sourceforge home page at: <http://sourceforge.net/projects/rio500/>.
-   There is also a mailing list: rio500-users@lists.sourceforge.net
-
-Authors
--------
-
-Most of the code was written by Cesar Miquel <miquel@df.uba.ar>. Keith
-Clayton <kclayton@jps.net> is incharge of the PPC port and making sure
-things work there. Bruce Tenison <btenison@dibbs.net> is adding support
-for .fon files and also does testing. The program will mostly sure be
-re-written and Pete Ikusz along with the rest will re-design it. I would
-also like to thank Tri Nguyen <tmn_3022000@hotmail.com> who provided use
-with some important information regarding the communication with the Rio.
-
-Additional Information and userspace tools
-
-       http://rio500.sourceforge.net/
-
-
-Requirements
-============
-
-A host with a USB port running a Linux kernel with RIO 500 support enabled.
-
-The driver is a module called rio500, which should be automatically loaded
-as you plug in your device. If that fails you can manually load it with
-
-  modprobe rio500
-
-Udev should automatically create a device node as soon as plug in your device.
-If that fails, you can manually add a device for the USB rio500::
-
-  mknod /dev/usb/rio500 c 180 64
-
-In that case, set appropriate permissions for /dev/usb/rio500 (don't forget
-about group and world permissions).  Both read and write permissions are
-required for proper operation.
-
-That's it.  The Rio500 Utils at: http://rio500.sourceforge.net should
-be able to access the rio500.
-
-Limits
-======
-
-You can use only a single rio500 device at a time with your computer.
-
-Bugs
-====
-
-If you encounter any problems feel free to drop me an email.
-
-Bruce Tenison
-btenison@dibbs.net
index 296de2b51c832ecc1869a77624c964ce398a5236..2a427d1e9f018c28ca6fbd76e294bfb6eeebddfc 100644 (file)
@@ -2165,12 +2165,10 @@ F:      arch/arm64/boot/dts/realtek/
 F:     Documentation/devicetree/bindings/arm/realtek.yaml
 
 ARM/RENESAS ARM64 ARCHITECTURE
-M:     Simon Horman <horms@verge.net.au>
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 M:     Magnus Damm <magnus.damm@gmail.com>
 L:     linux-renesas-soc@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
 S:     Supported
 F:     arch/arm64/boot/dts/renesas/
@@ -2282,12 +2280,10 @@ S:      Maintained
 F:     drivers/media/platform/s5p-mfc/
 
 ARM/SHMOBILE ARM ARCHITECTURE
-M:     Simon Horman <horms@verge.net.au>
 M:     Geert Uytterhoeven <geert+renesas@glider.be>
 M:     Magnus Damm <magnus.damm@gmail.com>
 L:     linux-renesas-soc@vger.kernel.org
 Q:     http://patchwork.kernel.org/project/linux-renesas-soc/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/geert/renesas-devel.git next
 S:     Supported
 F:     arch/arm/boot/dts/emev2*
@@ -2327,11 +2323,13 @@ F:      drivers/edac/altera_edac.
 
 ARM/SPREADTRUM SoC SUPPORT
 M:     Orson Zhai <orsonzhai@gmail.com>
-M:     Baolin Wang <baolin.wang@linaro.org>
+M:     Baolin Wang <baolin.wang7@gmail.com>
 M:     Chunyan Zhang <zhang.lyra@gmail.com>
 S:     Maintained
 F:     arch/arm64/boot/dts/sprd
 N:     sprd
+N:     sc27xx
+N:     sc2731
 
 ARM/STI ARCHITECTURE
 M:     Patrice Chotard <patrice.chotard@st.com>
@@ -3100,7 +3098,7 @@ S:        Supported
 F:     arch/arm64/net/
 
 BPF JIT for MIPS (32-BIT AND 64-BIT)
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     netdev@vger.kernel.org
 L:     bpf@vger.kernel.org
 S:     Maintained
@@ -3187,7 +3185,7 @@ N:        bcm216*
 N:     kona
 F:     arch/arm/mach-bcm/
 
-BROADCOM BCM2835 ARM ARCHITECTURE
+BROADCOM BCM2711/BCM2835 ARM ARCHITECTURE
 M:     Eric Anholt <eric@anholt.net>
 M:     Stefan Wahren <wahrenst@gmx.net>
 L:     bcm-kernel-feedback-list@broadcom.com
@@ -3195,6 +3193,7 @@ L:        linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 T:     git git://github.com/anholt/linux
 S:     Maintained
+N:     bcm2711
 N:     bcm2835
 F:     drivers/staging/vc04_services
 
@@ -3241,8 +3240,6 @@ S:        Maintained
 F:     drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
-M:     Brian Norris <computersforpeace@gmail.com>
-M:     Gregory Fong <gregory.0xf0@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
 M:     bcm-kernel-feedback-list@broadcom.com
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6112,7 +6109,10 @@ M:       Gao Xiang <gaoxiang25@huawei.com>
 M:     Chao Yu <yuchao0@huawei.com>
 L:     linux-erofs@lists.ozlabs.org
 S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git
+F:     Documentation/filesystems/erofs.txt
 F:     fs/erofs/
+F:     include/trace/events/erofs.h
 
 ERRSEQ ERROR TRACKING INFRASTRUCTURE
 M:     Jeff Layton <jlayton@kernel.org>
@@ -8002,7 +8002,7 @@ S:        Maintained
 F:     drivers/usb/atm/ueagle-atm.c
 
 IMGTEC ASCII LCD DRIVER
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 S:     Maintained
 F:     Documentation/devicetree/bindings/auxdisplay/img-ascii-lcd.txt
 F:     drivers/auxdisplay/img-ascii-lcd.c
@@ -9075,6 +9075,7 @@ F:        security/keys/
 KGDB / KDB /debug_core
 M:     Jason Wessel <jason.wessel@windriver.com>
 M:     Daniel Thompson <daniel.thompson@linaro.org>
+R:     Douglas Anderson <dianders@chromium.org>
 W:     http://kgdb.wiki.kernel.org/
 L:     kgdb-bugreport@lists.sourceforge.net
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jwessel/kgdb.git
@@ -9122,7 +9123,7 @@ F:        drivers/auxdisplay/ks0108.c
 F:     include/linux/ks0108.h
 
 L3MDEV
-M:     David Ahern <dsa@cumulusnetworks.com>
+M:     David Ahern <dsahern@kernel.org>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     net/l3mdev
@@ -9183,6 +9184,7 @@ M:        Pavel Machek <pavel@ucw.cz>
 R:     Dan Murphy <dmurphy@ti.com>
 L:     linux-leds@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pavel/linux-leds.git
 S:     Maintained
 F:     Documentation/devicetree/bindings/leds/
 F:     drivers/leds/
@@ -10254,7 +10256,7 @@ MEDIATEK ETHERNET DRIVER
 M:     Felix Fietkau <nbd@openwrt.org>
 M:     John Crispin <john@phrozen.org>
 M:     Sean Wang <sean.wang@mediatek.com>
-M:     Nelson Chang <nelson.chang@mediatek.com>
+M:     Mark Lee <Mark-MC.Lee@mediatek.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/mediatek/
@@ -10517,8 +10519,12 @@ F:     mm/memblock.c
 F:     Documentation/core-api/boot-time-mm.rst
 
 MEMORY MANAGEMENT
+M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 W:     http://www.linux-mm.org
+T:     quilt https://ozlabs.org/~akpm/mmotm/
+T:     quilt https://ozlabs.org/~akpm/mmots/
+T:     git git://github.com/hnaz/linux-mm.git
 S:     Maintained
 F:     include/linux/mm.h
 F:     include/linux/gfp.h
@@ -10827,7 +10833,7 @@ F:      drivers/usb/image/microtek.*
 
 MIPS
 M:     Ralf Baechle <ralf@linux-mips.org>
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 M:     James Hogan <jhogan@kernel.org>
 L:     linux-mips@vger.kernel.org
 W:     http://www.linux-mips.org/
@@ -10841,7 +10847,7 @@ F:      arch/mips/
 F:     drivers/platform/mips/
 
 MIPS BOSTON DEVELOPMENT BOARD
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Maintained
 F:     Documentation/devicetree/bindings/clock/img,boston-clock.txt
@@ -10851,7 +10857,7 @@ F:      drivers/clk/imgtec/clk-boston.c
 F:     include/dt-bindings/clock/boston-clock.h
 
 MIPS GENERIC PLATFORM
-M:     Paul Burton <paul.burton@mips.com>
+M:     Paul Burton <paulburton@kernel.org>
 L:     linux-mips@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/power/mti,mips-cpc.txt
@@ -11406,7 +11412,6 @@ F:      include/trace/events/tcp.h
 NETWORKING [TLS]
 M:     Boris Pismenny <borisp@mellanox.com>
 M:     Aviad Yehezkel <aviadye@mellanox.com>
-M:     Dave Watson <davejwatson@fb.com>
 M:     John Fastabend <john.fastabend@gmail.com>
 M:     Daniel Borkmann <daniel@iogearbox.net>
 M:     Jakub Kicinski <jakub.kicinski@netronome.com>
@@ -11543,6 +11548,7 @@ NSDEPS
 M:     Matthias Maennich <maennich@google.com>
 S:     Maintained
 F:     scripts/nsdeps
+F:     Documentation/core-api/symbol-namespaces.rst
 
 NTB AMD DRIVER
 M:     Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
@@ -12310,12 +12316,15 @@ F:    arch/parisc/
 F:     Documentation/parisc/
 F:     drivers/parisc/
 F:     drivers/char/agp/parisc-agp.c
+F:     drivers/input/misc/hp_sdc_rtc.c
 F:     drivers/input/serio/gscps2.c
+F:     drivers/input/serio/hp_sdc*
 F:     drivers/parport/parport_gsc.*
 F:     drivers/tty/serial/8250/8250_gsc.c
 F:     drivers/video/fbdev/sti*
 F:     drivers/video/console/sti*
 F:     drivers/video/logo/logo_parisc*
+F:     include/linux/hp_sdc.h
 
 PARMAN
 M:     Jiri Pirko <jiri@mellanox.com>
@@ -13359,7 +13368,7 @@ S:      Maintained
 F:     drivers/scsi/qla1280.[ch]
 
 QLOGIC QLA2XXX FC-SCSI DRIVER
-M:     qla2xxx-upstream@qlogic.com
+M:     hmadhani@marvell.com
 L:     linux-scsi@vger.kernel.org
 S:     Supported
 F:     Documentation/scsi/LICENSE.qla2xxx
@@ -13900,7 +13909,7 @@ F:      drivers/mtd/nand/raw/r852.h
 
 RISC-V ARCHITECTURE
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Albert Ou <aou@eecs.berkeley.edu>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
@@ -14777,7 +14786,7 @@ F:      drivers/media/usb/siano/
 F:     drivers/media/mmc/siano/
 
 SIFIVE DRIVERS
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 M:     Paul Walmsley <paul.walmsley@sifive.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://github.com/sifive/riscv-linux.git
@@ -14787,7 +14796,7 @@ N:      sifive
 
 SIFIVE FU540 SYSTEM-ON-CHIP
 M:     Paul Walmsley <paul.walmsley@sifive.com>
-M:     Palmer Dabbelt <palmer@sifive.com>
+M:     Palmer Dabbelt <palmer@dabbelt.com>
 L:     linux-riscv@lists.infradead.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/pjw/sifive.git
 S:     Supported
@@ -16762,13 +16771,6 @@ W:     http://www.linux-usb.org/usbnet
 S:     Maintained
 F:     drivers/net/usb/dm9601.c
 
-USB DIAMOND RIO500 DRIVER
-M:     Cesar Miquel <miquel@df.uba.ar>
-L:     rio500-users@lists.sourceforge.net
-W:     http://rio500.sourceforge.net
-S:     Maintained
-F:     drivers/usb/misc/rio500*
-
 USB EHCI DRIVER
 M:     Alan Stern <stern@rowland.harvard.edu>
 L:     linux-usb@vger.kernel.org
@@ -17435,7 +17437,7 @@ F:      include/linux/regulator/
 K:     regulator_get_optional
 
 VRF
-M:     David Ahern <dsa@cumulusnetworks.com>
+M:     David Ahern <dsahern@kernel.org>
 M:     Shrijeet Mukherjee <shrijeet@gmail.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
@@ -18036,6 +18038,7 @@ F:      Documentation/vm/zsmalloc.rst
 ZSWAP COMPRESSED SWAP CACHING
 M:     Seth Jennings <sjenning@redhat.com>
 M:     Dan Streetman <ddstreet@ieee.org>
+M:     Vitaly Wool <vitaly.wool@konsulko.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     mm/zswap.c
index 6f54f2f9574311301a8c59b2e8565dd5bebeb197..b37d0e8fc61d5422eb03b7a00ad6ee991b66f103 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,8 +2,8 @@
 VERSION = 5
 PATCHLEVEL = 4
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
-NAME = Bobtail Squid
+EXTRAVERSION = -rc6
+NAME = Kleptomaniac Octopus
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -206,24 +206,8 @@ ifndef KBUILD_CHECKSRC
   KBUILD_CHECKSRC = 0
 endif
 
-# Use make M=dir to specify directory of external module to build
-# Old syntax make ... SUBDIRS=$PWD is still supported
-# Setting the environment variable KBUILD_EXTMOD take precedence
-ifdef SUBDIRS
-  $(warning ================= WARNING ================)
-  $(warning 'SUBDIRS' will be removed after Linux 5.3)
-  $(warning )
-  $(warning If you are building an individual subdirectory)
-  $(warning in the kernel tree, you can do like this:)
-  $(warning $$ make path/to/dir/you/want/to/build/)
-  $(warning (Do not forget the trailing slash))
-  $(warning )
-  $(warning If you are building an external module,)
-  $(warning Please use 'M=' or 'KBUILD_EXTMOD' instead)
-  $(warning ==========================================)
-  KBUILD_EXTMOD ?= $(SUBDIRS)
-endif
-
+# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
+# directory of external module to build. Setting M= takes precedence.
 ifeq ("$(origin M)", "command line")
   KBUILD_EXTMOD := $(M)
 endif
@@ -498,7 +482,6 @@ export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
 export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
 export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
 export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
-export KBUILD_ARFLAGS
 
 # Files to ignore in find ... statements
 
@@ -616,7 +599,7 @@ endif
 # in addition to whatever we do anyway.
 # Just "make" or "make all" shall build modules as well
 
-ifneq ($(filter all _all modules,$(MAKECMDGOALS)),)
+ifneq ($(filter all _all modules nsdeps,$(MAKECMDGOALS)),)
   KBUILD_MODULES := 1
 endif
 
@@ -914,9 +897,6 @@ ifdef CONFIG_RETPOLINE
 KBUILD_CFLAGS += $(call cc-option,-fcf-protection=none)
 endif
 
-# use the deterministic mode of AR if available
-KBUILD_ARFLAGS := $(call ar-option,D)
-
 include scripts/Makefile.kasan
 include scripts/Makefile.extrawarn
 include scripts/Makefile.ubsan
@@ -1057,7 +1037,7 @@ export KBUILD_VMLINUX_OBJS := $(head-y) $(init-y) $(core-y) $(libs-y2) \
 export KBUILD_VMLINUX_LIBS := $(libs-y1)
 export KBUILD_LDS          := arch/$(SRCARCH)/kernel/vmlinux.lds
 export LDFLAGS_vmlinux
-# used by scripts/package/Makefile
+# used by scripts/Makefile.package
 export KBUILD_ALLDIRS := $(sort $(filter-out arch/%,$(vmlinux-alldirs)) LICENSES arch include scripts tools)
 
 vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_OBJS) $(KBUILD_VMLINUX_LIBS)
@@ -1237,9 +1217,8 @@ PHONY += kselftest
 kselftest:
        $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests run_tests
 
-PHONY += kselftest-clean
-kselftest-clean:
-       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests clean
+kselftest-%: FORCE
+       $(Q)$(MAKE) -C $(srctree)/tools/testing/selftests $*
 
 PHONY += kselftest-merge
 kselftest-merge:
index bfc7f5f5d6f26907121ddc20c4782a5d73f50265..9acbeba832c0b532ca1c4a2623f215335edcd481 100644 (file)
                clock-frequency = <33333333>;
        };
 
+       reg_5v0: regulator-5v0 {
+               compatible = "regulator-fixed";
+
+               regulator-name = "5v0-supply";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
        cpu_intc: cpu-interrupt-controller {
                compatible = "snps,archs-intc";
                interrupt-controller;
                        clocks = <&input_clk>;
                        cs-gpios = <&creg_gpio 0 GPIO_ACTIVE_LOW>,
                                   <&creg_gpio 1 GPIO_ACTIVE_LOW>;
+
+                       spi-flash@0 {
+                               compatible = "sst26wf016b", "jedec,spi-nor";
+                               reg = <0>;
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               spi-max-frequency = <4000000>;
+                       };
+
+                       adc@1 {
+                               compatible = "ti,adc108s102";
+                               reg = <1>;
+                               vref-supply = <&reg_5v0>;
+                               spi-max-frequency = <1000000>;
+                       };
                };
 
                creg_gpio: gpio@14b0 {
index 9b9a74444ce27455dd5f403e476de3b78d771a23..0974226fab550ff384d59c8514fcb79139e7c1d4 100644 (file)
@@ -32,6 +32,8 @@ CONFIG_INET=y
 CONFIG_DEVTMPFS=y
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_MTD=y
+CONFIG_MTD_SPI_NOR=y
 CONFIG_SCSI=y
 CONFIG_BLK_DEV_SD=y
 CONFIG_NETDEVICES=y
@@ -55,6 +57,8 @@ CONFIG_GPIO_SYSFS=y
 CONFIG_GPIO_DWAPB=y
 CONFIG_GPIO_SNPS_CREG=y
 # CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
 CONFIG_DRM=y
 # CONFIG_DRM_FBDEV_EMULATION is not set
 CONFIG_DRM_UDL=y
@@ -72,6 +76,8 @@ CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_DW=y
 CONFIG_DMADEVICES=y
 CONFIG_DW_AXI_DMAC=y
+CONFIG_IIO=y
+CONFIG_TI_ADC108S102=y
 CONFIG_EXT3_FS=y
 CONFIG_VFAT_FS=y
 CONFIG_TMPFS=y
index 861a8aea51f9fe0c086665dd84677f7ee80ea838..661fd842ea97db7ad4f3871b2a1038d715a44130 100644 (file)
@@ -614,8 +614,8 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
        /* loop thru all available h/w condition indexes */
        for (i = 0; i < cc_bcr.c; i++) {
                write_aux_reg(ARC_REG_CC_INDEX, i);
-               cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
-               cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
+               cc_name.indiv.word0 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME0));
+               cc_name.indiv.word1 = le32_to_cpu(read_aux_reg(ARC_REG_CC_NAME1));
 
                arc_pmu_map_hw_event(i, cc_name.str);
                arc_pmu_add_raw_event_attr(i, cc_name.str);
index 18f70b35da4c7da531876b940b71fb18e171ff3e..204bccfcc110ab24c484941744893e3b563422db 100644 (file)
        pinctrl-0 = <&mmc0_pins_default>;
 };
 
-&gpio0 {
+&gpio0_target {
        /* Do not idle the GPIO used for holding the VTT regulator */
        ti,no-reset-on-init;
        ti,no-idle-on-init;
index 9915c891e05fa5e5c9d76149f5f90f5c4cc54578..7a9eb2b0d45b012f6592e9227d55158899f5f2e7 100644 (file)
                        ranges = <0x0 0x5000 0x1000>;
                };
 
-               target-module@7000 {                    /* 0x44e07000, ap 14 20.0 */
+               gpio0_target: target-module@7000 {      /* 0x44e07000, ap 14 20.0 */
                        compatible = "ti,sysc-omap2", "ti,sysc";
                        ti,hwmods = "gpio1";
                        reg = <0x7000 0x4>,
                        reg = <0xe000 0x4>,
                              <0xe054 0x4>;
                        reg-names = "rev", "sysc";
-                       ti,sysc-midle ;
+                       ti,sysc-midle = <SYSC_IDLE_FORCE>,
+                                       <SYSC_IDLE_NO>,
+                                       <SYSC_IDLE_SMART>;
                        ti,sysc-sidle = <SYSC_IDLE_FORCE>,
                                        <SYSC_IDLE_NO>,
                                        <SYSC_IDLE_SMART>;
index 883fb85135d4645ad449b4581080f1334942702f..1b4b2b0500e4c0214d8c057559dd3003bfedf13f 100644 (file)
                reg = <0x70>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                i2c@0 {
                        /* FMC A */
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <0>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@1 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <1>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@2 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <2>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@3 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <3>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@4 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <4>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@5 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <5>;
-                       i2c-mux-idle-disconnect;
 
                        ina230@40 { compatible = "ti,ina230"; reg = <0x40>; shunt-resistor = <5000>; };
                        ina230@41 { compatible = "ti,ina230"; reg = <0x41>; shunt-resistor = <5000>; };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <6>;
-                       i2c-mux-idle-disconnect;
                };
 
                i2c@7 {
                        #address-cells = <1>;
                        #size-cells = <0>;
                        reg = <7>;
-                       i2c-mux-idle-disconnect;
 
                        u41: pca9575@20 {
                                compatible = "nxp,pca9575";
index 848e2a8884e2cdd0a7430a65c87576eca048f5ac..14bbc438055fd445092c177f85afb28262c1a7d4 100644 (file)
                                ti,hwmods = "dss_dispc";
                                clocks = <&disp_clk>;
                                clock-names = "fck";
+
+                               max-memory-bandwidth = <230000000>;
                        };
 
                        rfbi: rfbi@4832a800 {
index 09a088f98566178bbb4d5df30f44d03ca7280357..b75af21069f95d4b2bd4187d08cb2e974cbf6e5c 100644 (file)
        #address-cells = <1>;
        #size-cells = <0>;
        pinctrl-0 = <&emmc_gpio34 &gpclk2_gpio43>;
+       bus-width = <4>;
        mmc-pwrseq = <&wifi_pwrseq>;
        non-removable;
        status = "okay";
index 7c3cb7ece6cbf87c2f2e668fb7287968f95a44ee..925cb37c22f06b4f18e73f54b28bca45338c8b51 100644 (file)
@@ -9,6 +9,14 @@
                reg = <0 0x40000000>;
        };
 
+       leds {
+               /*
+                * Since there is no upstream GPIO driver yet,
+                * remove the incomplete node.
+                */
+               /delete-node/ act;
+       };
+
        reg_3v3: fixed-regulator {
                compatible = "regulator-fixed";
                regulator-name = "3V3";
index ea0e7c19eb4e3e3cb4fcdeb790229bcda971f928..5cac2dd58241fb33aff9356c8b0f6e7097ad860f 100644 (file)
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 129 1>, <&edma_xbar 128 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 22>,
+                               clocks = <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 0>,
                                         <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
                                         <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 28>;
                                clock-names = "fck", "ahclkx", "ahclkr";
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 131 1>, <&edma_xbar 130 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 22>,
-                                        <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 24>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 0>,
+                                        <&ipu_clkctrl DRA7_IPU_MCASP1_CLKCTRL 24>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP2_CLKCTRL 28>;
                                clock-names = "fck", "ahclkx", "ahclkr";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x68000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 133 1>, <&edma_xbar 132 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP3_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x6c000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 135 1>, <&edma_xbar 134 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP4_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x70000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 137 1>, <&edma_xbar 136 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP5_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x74000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 139 1>, <&edma_xbar 138 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP6_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x78000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 141 1>, <&edma_xbar 140 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP7_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
                                        <SYSC_IDLE_SMART>;
                        /* Domains (P, C): l4per_pwrdm, l4per2_clkdm */
                        clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>,
-                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 28>;
-                       clock-names = "fck", "ahclkx", "ahclkr";
+                                <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
+                       clock-names = "fck", "ahclkx";
                        #address-cells = <1>;
                        #size-cells = <1>;
                        ranges = <0x0 0x7c000 0x2000>,
                                interrupt-names = "tx", "rx";
                                dmas = <&edma_xbar 143 1>, <&edma_xbar 142 1>;
                                dma-names = "tx", "rx";
-                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 22>,
+                               clocks = <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 0>,
                                         <&l4per2_clkctrl DRA7_L4PER2_MCASP8_CLKCTRL 24>;
                                clock-names = "fck", "ahclkx";
                                status = "disabled";
index 7ceae357324860c9ff79bf41c5deb34d6fff3cc1..547fb141ec0c9f4f2aace5f2095bfbd2d921d2dd 100644 (file)
        vin-supply = <&sw1c_reg>;
 };
 
+&snvs_poweroff {
+       status = "okay";
+};
+
 &iomuxc {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_hog>;
index 710f850e785c6cbb465faa1697e15a52de378171..e2e604d6ba0b452d328a9488e956fffddd543c01 100644 (file)
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302d0000 0x10000>;
                                interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT1_ROOT_CLK>,
                                         <&clks IMX7D_GPT1_ROOT_CLK>;
                                clock-names = "ipg", "per";
                        };
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302e0000 0x10000>;
                                interrupts = <GIC_SPI 54 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT2_ROOT_CLK>,
                                         <&clks IMX7D_GPT2_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x302f0000 0x10000>;
                                interrupts = <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT3_ROOT_CLK>,
                                         <&clks IMX7D_GPT3_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
                                compatible = "fsl,imx7d-gpt", "fsl,imx6sx-gpt";
                                reg = <0x30300000 0x10000>;
                                interrupts = <GIC_SPI 52 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX7D_CLK_DUMMY>,
+                               clocks = <&clks IMX7D_GPT4_ROOT_CLK>,
                                         <&clks IMX7D_GPT4_ROOT_CLK>;
                                clock-names = "ipg", "per";
                                status = "disabled";
index 3fdd0a72f87f795b43d049a262ae5e3d7df2b967..506b118e511a6705007b4fe3decdcb8656eb65f2 100644 (file)
 &twl_gpio {
        ti,use-leds;
 };
+
+&twl_keypad {
+       status = "disabled";
+};
index 3621b7d2b22ac28eb905e5aa58e9ac0918c42eb5..9980c10c6e295f5e22dfaa3af94dd2b8c09f4f46 100644 (file)
        pinctrl-1 = <&ephy_leds_pins>;
        status = "okay";
 
+       gmac0: mac@0 {
+               compatible = "mediatek,eth-mac";
+               reg = <0>;
+               phy-mode = "2500base-x";
+               fixed-link {
+                       speed = <2500>;
+                       full-duplex;
+                       pause;
+               };
+       };
+
        gmac1: mac@1 {
                compatible = "mediatek,eth-mac";
                reg = <1>;
+               phy-mode = "gmii";
                phy-handle = <&phy0>;
        };
 
@@ -78,7 +90,6 @@
 
                phy0: ethernet-phy@0 {
                        reg = <0>;
-                       phy-mode = "gmii";
                };
        };
 };
index 9608bc2ccb3f9367f0bf5172564f0dd761555c26..867b88103b9d6dc70cca6a274dbcd1e6ad5f9751 100644 (file)
                        compatible = "mediatek,mt7629-sgmiisys", "syscon";
                        reg = <0x1b128000 0x3000>;
                        #clock-cells = <1>;
-                       mediatek,physpeed = "2500";
                };
 
                sgmiisys1: syscon@1b130000 {
                        compatible = "mediatek,mt7629-sgmiisys", "syscon";
                        reg = <0x1b130000 0x3000>;
                        #clock-cells = <1>;
-                       mediatek,physpeed = "2500";
                };
        };
 };
index d01fc8744fd79246fe068f2886b04dca2c75833a..b6ef1a7ac8a4ee6c2b7dcf01fda66b600dc60f91 100644 (file)
                        spi-max-frequency = <100000>;
                        spi-cpol;
                        spi-cpha;
+                       spi-cs-high;
 
                        backlight= <&backlight>;
                        label = "lcd";
index 4454449de00c0a784ad2b1a55e5238408ed32902..a40fe8d49da6469a573b43d1b0fa8e7d3ae08ada 100644 (file)
                compatible = "ti,wl1285", "ti,wl1283";
                reg = <2>;
                /* gpio_100 with gpmc_wait2 pad as wakeirq */
-               interrupts-extended = <&gpio4 4 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio4 4 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x4e>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <26000000>;
index 14be2ecb62b1f311aba87ad3e2dfd2b1b39ab834..55ea8b6189af56aa6956fc24306b90339a2fcb9a 100644 (file)
                compatible = "ti,wl1271";
                reg = <2>;
                /* gpio_53 with gpmc_ncs3 pad as wakeup */
-               interrupts-extended = <&gpio2 21 IRQ_TYPE_EDGE_RISING>,
+               interrupts-extended = <&gpio2 21 IRQ_TYPE_LEVEL_HIGH>,
                                      <&omap4_pmx_core 0x3a>;
                interrupt-names = "irq", "wakeup";
                ref-clock-frequency = <38400000>;
index 3c274965ff40a24ab6aa3439c6b7f344b6d37757..91480ac1f32867bda4a26f0f61d81fe36b5ff29f 100644 (file)
                compatible = "ti,wl1281";
                reg = <2>;
                interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_EDGE_RISING>; /* gpio 53 */
+               interrupts = <21 IRQ_TYPE_LEVEL_HIGH>; /* gpio 53 */
                ref-clock-frequency = <26000000>;
                tcxo-clock-frequency = <26000000>;
        };
index 6dbbc9b3229cc0eec363a96754cddf371638138c..d0032213101e652dd4b7ce54615178ce5bf44e98 100644 (file)
@@ -69,7 +69,7 @@
                compatible = "ti,wl1271";
                reg = <2>;
                interrupt-parent = <&gpio2>;
-               interrupts = <9 IRQ_TYPE_EDGE_RISING>; /* gpio 41 */
+               interrupts = <9 IRQ_TYPE_LEVEL_HIGH>; /* gpio 41 */
                ref-clock-frequency = <38400000>;
        };
 };
index 7fff555ee39435435ed1f023648b8b36a6af4dd1..68ac04641bdb10ef8a5af855cba5234fecb33a7c 100644 (file)
                pinctrl-names = "default";
                pinctrl-0 = <&wlcore_irq_pin>;
                interrupt-parent = <&gpio1>;
-               interrupts = <14 IRQ_TYPE_EDGE_RISING>; /* gpio 14 */
+               interrupts = <14 IRQ_TYPE_LEVEL_HIGH>;  /* gpio 14 */
                ref-clock-frequency = <26000000>;
        };
 };
index fac2e57dcca905e6dd8af4bebcb57d89aa727805..4791834dacb2ef1814c195a85912941d8a28123e 100644 (file)
                };
        };
 
-       gpu_cm: clock-controller@1500 {
+       gpu_cm: gpu_cm@1500 {
                compatible = "ti,omap4-cm";
                reg = <0x1500 0x100>;
                #address-cells = <1>;
index a53657b8328817c6970100215338267ea94b463c..bda454d1215078411f1e1e3898ce7d2cae6190f8 100644 (file)
@@ -8,6 +8,7 @@
 #include <dt-bindings/mfd/dbx500-prcmu.h>
 #include <dt-bindings/arm/ux500_pm_domains.h>
 #include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/thermal/thermal.h>
 
 / {
        #address-cells = <1>;
                 * cooling.
                 */
                cpu_thermal: cpu-thermal {
-                       polling-delay-passive = <0>;
-                       polling-delay = <1000>;
+                       polling-delay-passive = <250>;
+                       /*
+                        * This sensor fires interrupts to update the thermal
+                        * zone, so no polling is needed.
+                        */
+                       polling-delay = <0>;
 
                        thermal-sensors = <&thermal>;
 
@@ -79,7 +84,7 @@
 
                        cooling-maps {
                                trip = <&cpu_alert>;
-                               cooling-device = <&CPU0 0 2>;
+                               cooling-device = <&CPU0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
                                contribution = <100>;
                        };
                };
index e4a0d51ec3a8cde7003099dcc29f008f82704e74..0a3a7d66737b60dd4f3ff991e5ffc68629660e4b 100644 (file)
                                                 <STM32_PINMUX('F', 6, AF9)>; /* QSPI_BK1_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('B', 6, AF10)>; /* QSPI_BK1_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
                                                 <STM32_PINMUX('G', 7, AF11)>; /* QSPI_BK2_IO3 */
                                        bias-disable;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                                pins2 {
                                        pinmux = <STM32_PINMUX('C', 0, AF10)>; /* QSPI_BK2_NCS */
                                        bias-pull-up;
                                        drive-push-pull;
-                                       slew-rate = <3>;
+                                       slew-rate = <1>;
                                };
                        };
 
index ce823c44e98affebfe799183e92f56f43aea1fab..4c268b70b73571e1ec6a79a8093217aa25342b89 100644 (file)
                        interrupts = <39>;
                        clocks = <&ccu CLK_AHB_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <64>;
                        clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <40>;
                        clocks = <&ccu CLK_AHB_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <65>;
                        clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index cfb1efc8828c34de86917efff63a762b32b25985..6befa236ba99d43b1f97a1d8df0cc64391c20cc7 100644 (file)
                        interrupts = <39>;
                        clocks = <&ccu CLK_AHB_EHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <40>;
                        clocks = <&ccu CLK_USB_OHCI>, <&ccu CLK_AHB_OHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index bbeb743633c68ef6f19a1b1c5375eb5e821df88b..ac7638078420682986ca626fbf5d827bd1c8221d 100644 (file)
                        clocks = <&ccu CLK_AHB1_EHCI0>;
                        resets = <&ccu RST_AHB1_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_OHCI0>, <&ccu CLK_USB_OHCI0>;
                        resets = <&ccu RST_AHB1_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_EHCI1>;
                        resets = <&ccu RST_AHB1_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_AHB1_OHCI1>, <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_AHB1_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 49380de754a933b22a56b37a69c033f9a019e6b7..8aebefd6accfee08a67dd7a12ecb41a7576f470d 100644 (file)
                        compatible = "allwinner,sun7i-a20-csi0";
                        reg = <0x01c09000 0x1000>;
                        interrupts = <GIC_SPI 42 IRQ_TYPE_LEVEL_HIGH>;
-                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI0>,
-                                <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
-                       clock-names = "bus", "mod", "isp", "ram";
+                       clocks = <&ccu CLK_AHB_CSI0>, <&ccu CLK_CSI_SCLK>, <&ccu CLK_DRAM_CSI0>;
+                       clock-names = "bus", "isp", "ram";
                        resets = <&ccu RST_CSI0>;
                        status = "disabled";
                };
                        interrupts = <GIC_SPI 39 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_USB_OHCI0>, <&ccu CLK_AHB_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_AHB_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
                        clocks = <&ccu CLK_USB_OHCI1>, <&ccu CLK_AHB_OHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 52eed0ae3607666a86ec6865902d115168c0aa94..f292f96ab39b009bbad1a51d58b725d37bbe9bd2 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI>;
                        resets = <&ccu RST_BUS_EHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_OHCI>, <&ccu CLK_USB_OHCI>;
                        resets = <&ccu RST_BUS_OHCI>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 523be6611c50dcdea77bcba5a1266f34972bea58..74bb053cf23c62434641064da2424bf9f56637c3 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI0>;
                        resets = <&ccu RST_BUS_EHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_OHCI0>, <&ccu CLK_USB_OHCI0>;
                        resets = <&ccu RST_BUS_OHCI0>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index bde068111b859084f482439ecf959156a8a30415..c9c2688db66d9e16f4a31e576712873e20eb1c63 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI2>;
                        resets = <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index c34d505c7efeaecb95cab277cb24e601441321ff..b9b6fb00be285f46c16f7823e716b765a752f79f 100644 (file)
                        clocks = <&usb_clocks CLK_BUS_HCI0>;
                        resets = <&usb_clocks RST_USB0_HCI>;
                        phys = <&usbphy1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&usb_clocks CLK_USB_OHCI0>;
                        resets = <&usb_clocks RST_USB0_HCI>;
                        phys = <&usbphy1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&usb_clocks CLK_BUS_HCI1>;
                        resets = <&usb_clocks RST_USB1_HCI>;
                        phys = <&usbphy2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&usb_clocks CLK_BUS_HCI2>;
                        resets = <&usb_clocks RST_USB2_HCI>;
                        phys = <&usbphy3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&usb_clocks CLK_USB_OHCI2>;
                        resets = <&usb_clocks RST_USB2_HCI>;
                        phys = <&usbphy3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index eba190b3f9defdefa5a72558b73aa3a3b9090546..107eeafad20a151a0488524fb936714d0dbfa6da 100644 (file)
                        clocks = <&ccu CLK_BUS_EHCI1>, <&ccu CLK_BUS_OHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_EHCI1>, <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI2>, <&ccu CLK_BUS_OHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI2>;
                        resets = <&ccu RST_BUS_EHCI2>, <&ccu RST_BUS_OHCI2>;
                        phys = <&usbphy 2>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                        clocks = <&ccu CLK_BUS_EHCI3>, <&ccu CLK_BUS_OHCI3>;
                        resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
                        phys = <&usbphy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI3>;
                        resets = <&ccu RST_BUS_EHCI3>, <&ccu RST_BUS_OHCI3>;
                        phys = <&usbphy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index dc8a5f37a1ef2a6029bfcbd1d77e46d4b1936a7c..c8ebb23c4e02b03c2401db216d990ec3d63dd3d7 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
                reg = <0x70>;
+               i2c-mux-idle-disconnect;
 
                sff0_i2c: i2c@1 {
                        #address-cells = <1>;
                reg = <0x71>;
                #address-cells = <1>;
                #size-cells = <0>;
+               i2c-mux-idle-disconnect;
 
                sff5_i2c: i2c@1 {
                        #address-cells = <1>;
index 5ae5b5228467adb793e4a847dbbec838c4537300..ef484c4cfd1a252f30aba80e459beacf01ecb199 100644 (file)
@@ -91,7 +91,6 @@ CONFIG_USB_SERIAL_PL2303=m
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_EXT2_FS=m
 CONFIG_EXT3_FS=m
 CONFIG_MSDOS_FS=y
index e4f6442588e716b14832b2712f9f246e73163ba9..4fec2ec379adc36f417e22d8d9e844bf83cd0a6b 100644 (file)
@@ -195,7 +195,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index b34970ce6b314b669ebfb7910d0cd224988ed0ef..231f8973bbb2d8ca242eef675b2c4fbed6ba554b 100644 (file)
@@ -167,6 +167,7 @@ CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_DA8XX=y
 CONFIG_BACKLIGHT_PWM=m
+CONFIG_BACKLIGHT_GPIO=m
 CONFIG_FRAMEBUFFER_CONSOLE=y
 CONFIG_LOGO=y
 CONFIG_SOUND=m
@@ -228,7 +229,7 @@ CONFIG_RTC_DRV_OMAP=m
 CONFIG_DMADEVICES=y
 CONFIG_TI_EDMA=y
 CONFIG_COMMON_CLK_PWM=m
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_DA8XX_REMOTEPROC=m
 CONFIG_MEMORY=y
 CONFIG_TI_AEMIF=m
index 9bfffbe22d53cd9732b98415477aa61bd97c430e..0f7381ee0c375ff16db8ab7efe57d12dc9b88cd7 100644 (file)
@@ -276,6 +276,7 @@ CONFIG_VIDEO_OV5640=m
 CONFIG_VIDEO_OV5645=m
 CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
+CONFIG_DRM_MSM=y
 CONFIG_DRM_PANEL_LVDS=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_DRM_PANEL_SEIKO_43WVF1G=y
index 13ba532869017d3c969b005bfcf5948a1fbc4baf..e4c8def9a0a578d75e93e370b153cfe70414b983 100644 (file)
@@ -415,7 +415,7 @@ CONFIG_SPI_SH_MSIOF=m
 CONFIG_SPI_SH_HSPI=y
 CONFIG_SPI_SIRF=y
 CONFIG_SPI_STM32=m
-CONFIG_SPI_STM32_QSPI=m
+CONFIG_SPI_STM32_QSPI=y
 CONFIG_SPI_SUN4I=y
 CONFIG_SPI_SUN6I=y
 CONFIG_SPI_TEGRA114=y
@@ -933,7 +933,7 @@ CONFIG_BCM2835_MBOX=y
 CONFIG_ROCKCHIP_IOMMU=y
 CONFIG_TEGRA_IOMMU_GART=y
 CONFIG_TEGRA_IOMMU_SMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_ST_REMOTEPROC=m
 CONFIG_RPMSG_VIRTIO=m
 CONFIG_ASPEED_LPC_CTRL=m
index 64eb896907bfa8ee2473cb8d32d6825630d699a9..40d7f1a4fc458b5b9b777cfaee99a995f2bddfac 100644 (file)
@@ -356,14 +356,15 @@ CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
 CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
 CONFIG_DRM_OMAP_PANEL_DPI=m
 CONFIG_DRM_OMAP_PANEL_DSI_CM=m
-CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM=m
-CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1=m
-CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1=m
-CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11=m
 CONFIG_DRM_TILCDC=m
 CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_PANEL_LG_LB035Q02=m
+CONFIG_DRM_PANEL_NEC_NL8048HL11=m
+CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
+CONFIG_DRM_PANEL_SONY_ACX565AKM=m
+CONFIG_DRM_PANEL_TPO_TD028TTEC1=m
+CONFIG_DRM_PANEL_TPO_TD043MTEA1=m
 CONFIG_FB=y
 CONFIG_FIRMWARE_EDID=y
 CONFIG_FB_MODE_HELPERS=y
@@ -423,6 +424,7 @@ CONFIG_USB_SERIAL_GENERIC=y
 CONFIG_USB_SERIAL_SIMPLE=m
 CONFIG_USB_SERIAL_FTDI_SIO=m
 CONFIG_USB_SERIAL_PL2303=m
+CONFIG_USB_SERIAL_OPTION=m
 CONFIG_USB_TEST=m
 CONFIG_NOP_USB_XCEIV=m
 CONFIG_AM335X_PHY_USB=m
@@ -460,6 +462,7 @@ CONFIG_MMC_SDHCI_OMAP=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=m
 CONFIG_LEDS_CPCAP=m
+CONFIG_LEDS_LM3532=m
 CONFIG_LEDS_GPIO=m
 CONFIG_LEDS_PCA963X=m
 CONFIG_LEDS_PWM=m
@@ -481,7 +484,7 @@ CONFIG_RTC_DRV_OMAP=m
 CONFIG_RTC_DRV_CPCAP=m
 CONFIG_DMADEVICES=y
 CONFIG_OMAP_IOMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_OMAP_REMOTEPROC=m
 CONFIG_WKUP_M3_RPROC=m
 CONFIG_SOC_TI=y
index 787c3f9be4142c88f5881f6c37116a306901bfa0..b817c57f05f1c2b1a9640e02228ebe36e09d7ffe 100644 (file)
@@ -581,7 +581,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index 95b5a4ffddeabba1ece89ae9a70f2ab38c049e64..73ed73a8785a04b6094503c9e7b8e7dc1ecd6896 100644 (file)
@@ -327,7 +327,6 @@ CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
 CONFIG_USB_SEVSEG=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 4fb51d665abb888b172886bf6eb6525ae9619e90..a1cdbfa064c5f5376451d17b89ca752b90baad90 100644 (file)
@@ -189,7 +189,6 @@ CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index b24df84a1d7adceaa63880578af491e58a2b7cc5..043b0b18bf7e00a38ebd03ea4e262f2651edfdfd 100644 (file)
@@ -98,6 +98,7 @@ config CRYPTO_AES_ARM_CE
        tristate "Accelerated AES using ARMv8 Crypto Extensions"
        depends on KERNEL_MODE_NEON
        select CRYPTO_BLKCIPHER
+       select CRYPTO_LIB_AES
        select CRYPTO_SIMD
        help
          Use an implementation of AES in CBC, CTR and XTS modes that uses
index b978cdf133af60f6f49675dda9b565dc04e47e32..4d1707388d94194b253741e4021fc22fd8c44506 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/assembler.h>
 
        .text
+       .arch           armv8-a
        .fpu            crypto-neon-fp-armv8
        .align          3
 
index 567dbede4785ce96b89718cc9f1725150d13d4eb..f1d0a7807cd0e13beca85a00ea9a34ef504b104a 100644 (file)
@@ -82,7 +82,7 @@
 #ifndef __ASSEMBLY__
 
 #ifdef CONFIG_CPU_CP15_MMU
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        unsigned int domain;
 
@@ -94,7 +94,7 @@ static inline unsigned int get_domain(void)
        return domain;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
        asm volatile(
        "mcr    p15, 0, %0, c3, c0      @ set domain"
@@ -102,12 +102,12 @@ static inline void set_domain(unsigned val)
        isb();
 }
 #else
-static inline unsigned int get_domain(void)
+static __always_inline unsigned int get_domain(void)
 {
        return 0;
 }
 
-static inline void set_domain(unsigned val)
+static __always_inline void set_domain(unsigned int val)
 {
 }
 #endif
index 303248e5b990f5ae4698432050aa53a6f0014340..98c6b91be4a8addd8212a3f170927e259ffb0e0d 100644 (file)
@@ -22,7 +22,7 @@
  * perform such accesses (eg, via list poison values) which could then
  * be exploited for priviledge escalation.
  */
-static inline unsigned int uaccess_save_and_enable(void)
+static __always_inline unsigned int uaccess_save_and_enable(void)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        unsigned int old_domain = get_domain();
@@ -37,7 +37,7 @@ static inline unsigned int uaccess_save_and_enable(void)
 #endif
 }
 
-static inline void uaccess_restore(unsigned int flags)
+static __always_inline void uaccess_restore(unsigned int flags)
 {
 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
        /* Restore the user access mask */
diff --git a/arch/arm/include/asm/xen/xen-ops.h b/arch/arm/include/asm/xen/xen-ops.h
deleted file mode 100644 (file)
index ec154e7..0000000
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
index a7810be07da1c56c1502a72b60d7bc3da8075d91..4a3982812a401f1259909df4e1d05ead3f29dd9c 100644 (file)
@@ -68,7 +68,7 @@ ENDPROC(__vet_atags)
  * The following fragment of code is executed with the MMU on in MMU mode,
  * and uses absolute addresses; this is not position independent.
  *
- *  r0  = cp#15 control register
+ *  r0  = cp#15 control register (exc_ret for M-class)
  *  r1  = machine ID
  *  r2  = atags/dtb pointer
  *  r9  = processor ID
@@ -137,7 +137,8 @@ __mmap_switched_data:
 #ifdef CONFIG_CPU_CP15
        .long   cr_alignment                    @ r3
 #else
-       .long   0                               @ r3
+M_CLASS(.long  exc_ret)                        @ r3
+AR_CLASS(.long 0)                              @ r3
 #endif
        .size   __mmap_switched_data, . - __mmap_switched_data
 
index afa350f44dea3308609754e279e040c10c711f95..0fc814bbc34b171e43e55a57b081220b15fe70a2 100644 (file)
@@ -201,6 +201,8 @@ M_CLASS(streq       r3, [r12, #PMSAv8_MAIR1])
        bic     r0, r0, #V7M_SCB_CCR_IC
 #endif
        str     r0, [r12, V7M_SCB_CCR]
+       /* Pass exc_ret to __mmap_switched */
+       mov     r0, r10
 #endif /* CONFIG_CPU_CP15 elif CONFIG_CPU_V7M */
        ret     lr
 ENDPROC(__after_proc_init)
index 8062412be70f06dc8d252ca45f9f5ff86bba262e..9fc5c73cc0be2d0644036eaa9ed15b9083af7ea5 100644 (file)
@@ -462,8 +462,8 @@ static s8 dm365_queue_priority_mapping[][2] = {
 };
 
 static const struct dma_slave_map dm365_edma_map[] = {
-       { "davinci-mcbsp.0", "tx", EDMA_FILTER_PARAM(0, 2) },
-       { "davinci-mcbsp.0", "rx", EDMA_FILTER_PARAM(0, 3) },
+       { "davinci-mcbsp", "tx", EDMA_FILTER_PARAM(0, 2) },
+       { "davinci-mcbsp", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "davinci_voicecodec", "tx", EDMA_FILTER_PARAM(0, 2) },
        { "davinci_voicecodec", "rx", EDMA_FILTER_PARAM(0, 3) },
        { "spi_davinci.2", "tx", EDMA_FILTER_PARAM(0, 10) },
index dd939e1325c64a4d1496c02465b18f225bd22b9b..29fd13684a68afd37e6cb6b9e6d6201ffa187cd8 100644 (file)
@@ -763,7 +763,8 @@ static struct omap_hwmod_class_sysconfig am33xx_timer_sysc = {
        .rev_offs       = 0x0000,
        .sysc_offs      = 0x0010,
        .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+                         SYSC_HAS_RESET_STATUS,
        .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
                          SIDLE_SMART_WKUP),
        .sysc_fields    = &omap_hwmod_sysc_type2,
index 2bcb6345b8735bc75accd0b82f4c3d2e63919570..54524775f2782ddd3998eb51e9e90123106557ac 100644 (file)
@@ -231,8 +231,9 @@ static struct omap_hwmod am33xx_control_hwmod = {
 static struct omap_hwmod_class_sysconfig lcdc_sysc = {
        .rev_offs       = 0x0,
        .sysc_offs      = 0x54,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
+       .sysc_flags     = SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE,
+       .idlemodes      = SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                         MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART,
        .sysc_fields    = &omap_hwmod_sysc_type2,
 };
 
index d942a335709076fe743935e3331d65388d270707..2efd18e8824c78e597eb28aa2332d85193660167 100644 (file)
@@ -89,6 +89,13 @@ static struct iommu_platform_data omap3_iommu_pdata = {
        .reset_name = "mmu",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
+};
+
+static struct iommu_platform_data omap3_iommu_isp_pdata = {
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 
 static int omap3_sbc_t3730_twl_callback(struct device *dev,
@@ -424,6 +431,8 @@ static struct iommu_platform_data omap4_iommu_pdata = {
        .reset_name = "mmu_cache",
        .assert_reset = omap_device_assert_hardreset,
        .deassert_reset = omap_device_deassert_hardreset,
+       .device_enable = omap_device_enable,
+       .device_idle = omap_device_idle,
 };
 #endif
 
@@ -617,6 +626,8 @@ static struct of_dev_auxdata omap_auxdata_lookup[] = {
 #ifdef CONFIG_ARCH_OMAP3
        OF_DEV_AUXDATA("ti,omap2-iommu", 0x5d000000, "5d000000.mmu",
                       &omap3_iommu_pdata),
+       OF_DEV_AUXDATA("ti,omap2-iommu", 0x480bd400, "480bd400.mmu",
+                      &omap3_iommu_isp_pdata),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-core", 0x480cb000,
                       "480cb000.smartreflex", &omap_sr_pdata[OMAP_SR_CORE]),
        OF_DEV_AUXDATA("ti,omap3-smartreflex-mpu-iva", 0x480c9000,
index 1fde1bf53fb65b4092f727f7e5fe246f49f02b5c..7ac9af56762df6f5943523aed760afc2fd83cd29 100644 (file)
@@ -74,83 +74,6 @@ int omap_pm_clkdms_setup(struct clockdomain *clkdm, void *unused)
        return 0;
 }
 
-/*
- * This API is to be called during init to set the various voltage
- * domains to the voltage as per the opp table. Typically we boot up
- * at the nominal voltage. So this function finds out the rate of
- * the clock associated with the voltage domain, finds out the correct
- * opp entry and sets the voltage domain to the voltage specified
- * in the opp entry
- */
-static int __init omap2_set_init_voltage(char *vdd_name, char *clk_name,
-                                        const char *oh_name)
-{
-       struct voltagedomain *voltdm;
-       struct clk *clk;
-       struct dev_pm_opp *opp;
-       unsigned long freq, bootup_volt;
-       struct device *dev;
-
-       if (!vdd_name || !clk_name || !oh_name) {
-               pr_err("%s: invalid parameters\n", __func__);
-               goto exit;
-       }
-
-       if (!strncmp(oh_name, "mpu", 3))
-               /* 
-                * All current OMAPs share voltage rail and clock
-                * source, so CPU0 is used to represent the MPU-SS.
-                */
-               dev = get_cpu_device(0);
-       else
-               dev = omap_device_get_by_hwmod_name(oh_name);
-
-       if (IS_ERR(dev)) {
-               pr_err("%s: Unable to get dev pointer for hwmod %s\n",
-                       __func__, oh_name);
-               goto exit;
-       }
-
-       voltdm = voltdm_lookup(vdd_name);
-       if (!voltdm) {
-               pr_err("%s: unable to get vdd pointer for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       clk =  clk_get(NULL, clk_name);
-       if (IS_ERR(clk)) {
-               pr_err("%s: unable to get clk %s\n", __func__, clk_name);
-               goto exit;
-       }
-
-       freq = clk_get_rate(clk);
-       clk_put(clk);
-
-       opp = dev_pm_opp_find_freq_ceil(dev, &freq);
-       if (IS_ERR(opp)) {
-               pr_err("%s: unable to find boot up OPP for vdd_%s\n",
-                       __func__, vdd_name);
-               goto exit;
-       }
-
-       bootup_volt = dev_pm_opp_get_voltage(opp);
-       dev_pm_opp_put(opp);
-
-       if (!bootup_volt) {
-               pr_err("%s: unable to find voltage corresponding to the bootup OPP for vdd_%s\n",
-                      __func__, vdd_name);
-               goto exit;
-       }
-
-       voltdm_scale(voltdm, bootup_volt);
-       return 0;
-
-exit:
-       pr_err("%s: unable to set vdd_%s\n", __func__, vdd_name);
-       return -EINVAL;
-}
-
 #ifdef CONFIG_SUSPEND
 static int omap_pm_enter(suspend_state_t suspend_state)
 {
@@ -208,25 +131,6 @@ void omap_common_suspend_init(void *pm_suspend)
 }
 #endif /* CONFIG_SUSPEND */
 
-static void __init omap3_init_voltages(void)
-{
-       if (!soc_is_omap34xx())
-               return;
-
-       omap2_set_init_voltage("mpu_iva", "dpll1_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_ick", "l3_main");
-}
-
-static void __init omap4_init_voltages(void)
-{
-       if (!soc_is_omap44xx())
-               return;
-
-       omap2_set_init_voltage("mpu", "dpll_mpu_ck", "mpu");
-       omap2_set_init_voltage("core", "l3_div_ck", "l3_main_1");
-       omap2_set_init_voltage("iva", "dpll_iva_m5x2_ck", "iva");
-}
-
 int __maybe_unused omap_pm_nop_init(void)
 {
        return 0;
@@ -246,10 +150,6 @@ int __init omap2_common_pm_late_init(void)
        omap4_twl_init();
        omap_voltage_late_init();
 
-       /* Initialize the voltages */
-       omap3_init_voltages();
-       omap4_init_voltages();
-
        /* Smartreflex device init */
        omap_devinit_smartreflex();
 
index 04b36436cbc04877a060b10670bf453dd2ed0e2d..788c5cf46de593a8e53265c7de3842496fe21dc1 100644 (file)
@@ -324,7 +324,7 @@ union offset_union {
        __put32_unaligned_check("strbt", val, addr)
 
 static void
-do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs *regs, union offset_union offset)
+do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
 {
        if (!LDST_U_BIT(instr))
                offset.un = -offset.un;
@@ -337,7 +337,7 @@ do_alignment_finish_ldst(unsigned long addr, unsigned long instr, struct pt_regs
 }
 
 static int
-do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -386,8 +386,7 @@ do_alignment_ldrhstrh(unsigned long addr, unsigned long instr, struct pt_regs *r
 }
 
 static int
-do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
-                     struct pt_regs *regs)
+do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
        unsigned int rd2;
@@ -449,7 +448,7 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr,
 }
 
 static int
-do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd = RD_BITS(instr);
 
@@ -498,7 +497,7 @@ do_alignment_ldrstr(unsigned long addr, unsigned long instr, struct pt_regs *reg
  * PU = 10             A                    B
  */
 static int
-do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *regs)
+do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
 {
        unsigned int rd, rn, correction, nr_regs, regbits;
        unsigned long eaddr, newaddr;
@@ -539,7 +538,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
         * processor for us.
         */
        if (addr != eaddr) {
-               pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
+               pr_err("LDMSTM: PC = %08lx, instr = %08x, "
                        "addr = %08lx, eaddr = %08lx\n",
                         instruction_pointer(regs), instr, addr, eaddr);
                show_regs(regs);
@@ -716,10 +715,10 @@ thumb2arm(u16 tinstr)
  * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
  */
 static void *
-do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
+do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
                            union offset_union *poffset)
 {
-       unsigned long instr = *pinstr;
+       u32 instr = *pinstr;
        u16 tinst1 = (instr >> 16) & 0xffff;
        u16 tinst2 = instr & 0xffff;
 
@@ -767,17 +766,48 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
        return NULL;
 }
 
+static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
+{
+       u32 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_arm(instr);
+
+       return fault;
+}
+
+static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
+{
+       u16 instr = 0;
+       int fault;
+
+       if (user_mode(regs))
+               fault = get_user(instr, ip);
+       else
+               fault = probe_kernel_address(ip, instr);
+
+       *inst = __mem_to_opcode_thumb16(instr);
+
+       return fault;
+}
+
 static int
 do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
        union offset_union uninitialized_var(offset);
-       unsigned long instr = 0, instrptr;
-       int (*handler)(unsigned long addr, unsigned long instr, struct pt_regs *regs);
+       unsigned long instrptr;
+       int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
        unsigned int type;
-       unsigned int fault;
+       u32 instr = 0;
        u16 tinstr = 0;
        int isize = 4;
        int thumb2_32b = 0;
+       int fault;
 
        if (interrupts_enabled(regs))
                local_irq_enable();
@@ -786,15 +816,14 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 
        if (thumb_mode(regs)) {
                u16 *ptr = (u16 *)(instrptr & ~1);
-               fault = probe_kernel_address(ptr, tinstr);
-               tinstr = __mem_to_opcode_thumb16(tinstr);
+
+               fault = alignment_get_thumb(regs, ptr, &tinstr);
                if (!fault) {
                        if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
                            IS_T32(tinstr)) {
                                /* Thumb-2 32-bit */
-                               u16 tinst2 = 0;
-                               fault = probe_kernel_address(ptr + 1, tinst2);
-                               tinst2 = __mem_to_opcode_thumb16(tinst2);
+                               u16 tinst2;
+                               fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
                                instr = __opcode_thumb32_compose(tinstr, tinst2);
                                thumb2_32b = 1;
                        } else {
@@ -803,8 +832,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
                        }
                }
        } else {
-               fault = probe_kernel_address((void *)instrptr, instr);
-               instr = __mem_to_opcode_arm(instr);
+               fault = alignment_get_arm(regs, (void *)instrptr, &instr);
        }
 
        if (fault) {
@@ -926,7 +954,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * Oops, we didn't handle the instruction.
         */
        pr_err("Alignment trap: not handling instruction "
-               "%0*lx at [<%08lx>]\n",
+               "%0*x at [<%08lx>]\n",
                isize << 1,
                isize == 2 ? tinstr : instr, instrptr);
        ai_skipped += 1;
@@ -936,7 +964,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        ai_user += 1;
 
        if (ai_usermode & UM_WARN)
-               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
+               printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
                       "Address=0x%08lx FSR 0x%03x\n", current->comm,
                        task_pid_nr(current), instrptr,
                        isize << 1,
index 1448f144e7fb9c441902e2728f572aa91f405157..1a49d503eafc80b461d256f4f068e9a54c6d85f6 100644 (file)
@@ -132,13 +132,11 @@ __v7m_setup_cont:
        dsb
        mov     r6, lr                  @ save LR
        ldr     sp, =init_thread_union + THREAD_START_SP
-       stmia   sp, {r0-r3, r12}
        cpsie   i
        svc     #0
 1:     cpsid   i
-       ldr     r0, =exc_ret
-       orr     lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
-       str     lr, [r0]
+       /* Calculate exc_ret */
+       orr     r10, lr, #EXC_RET_THREADMODE_PROCESSSTACK
        ldmia   sp, {r0-r3, r12}
        str     r5, [r12, #11 * 4]      @ restore the original SVC vector entry
        mov     lr, r6                  @ restore LR
index 7ed28982c4c309b5833ab7c03880d6d8b5d60243..c32d04713ba0554d0706509ed7273a0f3f53d2b0 100644 (file)
@@ -1,3 +1,2 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-y          := enlighten.o hypercall.o grant-table.o p2m.o mm.o
-obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/arm/xen/efi.c b/arch/arm/xen/efi.c
deleted file mode 100644 (file)
index d687a73..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (c) 2015, Linaro Limited, Shannon Zhao
- */
-
-#include <linux/efi.h>
-#include <xen/xen-ops.h>
-#include <asm/xen/xen-ops.h>
-
-/* Set XEN EFI runtime services function pointers. Other fields of struct efi,
- * e.g. efi.systab, will be set like normal EFI.
- */
-void __init xen_efi_runtime_setup(void)
-{
-       efi.get_time                 = xen_efi_get_time;
-       efi.set_time                 = xen_efi_set_time;
-       efi.get_wakeup_time          = xen_efi_get_wakeup_time;
-       efi.set_wakeup_time          = xen_efi_set_wakeup_time;
-       efi.get_variable             = xen_efi_get_variable;
-       efi.get_next_variable        = xen_efi_get_next_variable;
-       efi.set_variable             = xen_efi_set_variable;
-       efi.query_variable_info      = xen_efi_query_variable_info;
-       efi.update_capsule           = xen_efi_update_capsule;
-       efi.query_capsule_caps       = xen_efi_query_capsule_caps;
-       efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
-       efi.reset_system             = xen_efi_reset_system;
-}
-EXPORT_SYMBOL_GPL(xen_efi_runtime_setup);
index 1e57692552d9db9bd5617b902c0e1b8de3e9cbe4..dd6804a64f1a041935e80919678be6c8f22594e6 100644 (file)
@@ -15,7 +15,6 @@
 #include <xen/xen-ops.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
-#include <asm/xen/xen-ops.h>
 #include <asm/system_misc.h>
 #include <asm/efi.h>
 #include <linux/interrupt.h>
@@ -437,7 +436,7 @@ EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
-EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op);
+EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
 EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
 EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
 EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
index 2b2c208408bb1025472abcec36a57812d80abf20..38fa917c8585c7e6b6e2efe75d785885f41260a2 100644 (file)
@@ -28,7 +28,10 @@ unsigned long xen_get_swiotlb_free_pages(unsigned int order)
 
        for_each_memblock(memory, reg) {
                if (reg->base < (phys_addr_t)0xffffffff) {
-                       flags |= __GFP_DMA;
+                       if (IS_ENABLED(CONFIG_ZONE_DMA32))
+                               flags |= __GFP_DMA32;
+                       else
+                               flags |= __GFP_DMA;
                        break;
                }
        }
index 41a9b4257b727710bcd4be1e9bcf47e6739a908d..3f047afb982c8d17ad9e9c41e04b35d1fc02cffe 100644 (file)
@@ -110,7 +110,6 @@ config ARM64
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
        select GENERIC_GETTIMEOFDAY
-       select GENERIC_COMPAT_VDSO if (!CPU_BIG_ENDIAN && COMPAT)
        select HANDLE_DOMAIN_IRQ
        select HARDIRQS_SW_RESEND
        select HAVE_PCI
@@ -617,6 +616,23 @@ config CAVIUM_ERRATUM_30115
 
          If unsure, say Y.
 
+config CAVIUM_TX2_ERRATUM_219
+       bool "Cavium ThunderX2 erratum 219: PRFM between TTBR change and ISB fails"
+       default y
+       help
+         On Cavium ThunderX2, a load, store or prefetch instruction between a
+         TTBR update and the corresponding context synchronizing operation can
+         cause a spurious Data Abort to be delivered to any hardware thread in
+         the CPU core.
+
+         Work around the issue by avoiding the problematic code sequence and
+         trapping KVM guest TTBRx_EL1 writes to EL2 when SMT is enabled. The
+         trap handler performs the corresponding register access, skips the
+         instruction and ensures context synchronization by virtue of the
+         exception return.
+
+         If unsure, say Y.
+
 config QCOM_FALKOR_ERRATUM_1003
        bool "Falkor E1003: Incorrect translation due to ASID change"
        default y
@@ -1159,7 +1175,7 @@ menuconfig COMPAT
 if COMPAT
 
 config KUSER_HELPERS
-       bool "Enable kuser helpers page for 32 bit applications"
+       bool "Enable kuser helpers page for 32-bit applications"
        default y
        help
          Warning: disabling this option may break 32-bit user programs.
@@ -1185,6 +1201,18 @@ config KUSER_HELPERS
          Say N here only if you are absolutely certain that you do not
          need these helpers; otherwise, the safe option is to say Y.
 
+config COMPAT_VDSO
+       bool "Enable vDSO for 32-bit applications"
+       depends on !CPU_BIG_ENDIAN && "$(CROSS_COMPILE_COMPAT)" != ""
+       select GENERIC_COMPAT_VDSO
+       default y
+       help
+         Place in the process address space of 32-bit applications an
+         ELF shared object providing fast implementations of gettimeofday
+         and clock_gettime.
+
+         You must have a 32-bit build of glibc 2.22 or later for programs
+         to seamlessly take advantage of this.
 
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
index 84a3d502c5a535786aa8783fcc84a021bc156426..2c0238ce05515a0ff726ad625964a4429799579f 100644 (file)
@@ -53,22 +53,6 @@ $(warning Detected assembler with broken .inst; disassembly will be unreliable)
   endif
 endif
 
-ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y)
-  CROSS_COMPILE_COMPAT ?= $(CONFIG_CROSS_COMPILE_COMPAT_VDSO:"%"=%)
-
-  ifeq ($(CONFIG_CC_IS_CLANG), y)
-    $(warning CROSS_COMPILE_COMPAT is clang, the compat vDSO will not be built)
-  else ifeq ($(strip $(CROSS_COMPILE_COMPAT)),)
-    $(warning CROSS_COMPILE_COMPAT not defined or empty, the compat vDSO will not be built)
-  else ifeq ($(shell which $(CROSS_COMPILE_COMPAT)gcc 2> /dev/null),)
-    $(error $(CROSS_COMPILE_COMPAT)gcc not found, check CROSS_COMPILE_COMPAT)
-  else
-    export CROSS_COMPILE_COMPAT
-    export CONFIG_COMPAT_VDSO := y
-    compat_vdso := -DCONFIG_COMPAT_VDSO=1
-  endif
-endif
-
 KBUILD_CFLAGS  += -mgeneral-regs-only $(lseinstr) $(brokengasinst)     \
                   $(compat_vdso) $(cc_has_k_constraint)
 KBUILD_CFLAGS  += -fno-asynchronous-unwind-tables
index 24f1aac366d64355f5b6b37bb8e263bcce7f2e2d..d5b6e8159a335a0fde372e68f84e2101fc448560 100644 (file)
                reg = <1>;
        };
 };
+
+&reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
+};
index 2b6345db7dc099cdb6534f340e4e2fef12fe4f43..78c82a665c84a11c9de7a33e40e4aab67a98e792 100644 (file)
 
 &ehci0 {
        phys = <&usbphy 0>;
+       phy-names = "usb";
        status = "okay";
 };
 
 
 &ohci0 {
        phys = <&usbphy 0>;
+       phy-names = "usb";
        status = "okay";
 };
 
index e6fb9683f21355ea7e3a947b20b3ea4fc7900d61..25099202c52c99d37b06ca3127dcc9b9b8214b4b 100644 (file)
 };
 
 &reg_dc1sw {
+       /*
+        * Ethernet PHY needs 30ms to properly power up and some more
+        * to initialize. 100ms should be plenty of time to finish
+        * whole process.
+        */
+       regulator-enable-ramp-delay = <100000>;
        regulator-name = "vcc-phy";
 };
 
index 69128a6dfc46f0a2e45d1acd02380192b9905da7..70f4cce6be43e1707aa5b0a3b682c281683d1857 100644 (file)
                clock-output-names = "ext-osc32k";
        };
 
-       pmu {
-               compatible = "arm,cortex-a53-pmu";
-               interrupts = <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
-                            <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
-               interrupt-affinity = <&cpu0>, <&cpu1>, <&cpu2>, <&cpu3>;
-       };
-
        psci {
                compatible = "arm,psci-0.2";
                method = "smc";
                        resets = <&ccu RST_BUS_OHCI1>,
                                 <&ccu RST_BUS_EHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI1>;
                        resets = <&ccu RST_BUS_OHCI1>;
                        phys = <&usbphy 1>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 4020a1aafa3e33bdd79caba258751bb19091be89..0d5ea19336a19d9d5460751a5b4728fa300d324a 100644 (file)
                        resets = <&ccu RST_BUS_OHCI3>,
                                 <&ccu RST_BUS_EHCI3>;
                        phys = <&usb2phy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
                                 <&ccu CLK_USB_OHCI3>;
                        resets = <&ccu RST_BUS_OHCI3>;
                        phys = <&usb2phy 3>;
+                       phy-names = "usb";
                        status = "disabled";
                };
 
index 8a3a770e8f2ce62bb99fc9fd74461073cfa7780a..56789ccf94545f39cde28c34f9dba8af495322a7 100644 (file)
 
                pinmux: pinmux@14029c {
                        compatible = "pinctrl-single";
-                       reg = <0x0014029c 0x250>;
+                       reg = <0x0014029c 0x26c>;
                        #address-cells = <1>;
                        #size-cells = <1>;
                        pinctrl-single,register-width = <32>;
                        pinctrl-single,function-mask = <0xf>;
                        pinctrl-single,gpio-range = <
-                               &range 0 154 MODE_GPIO
+                               &range 0  91 MODE_GPIO
+                               &range 95 60 MODE_GPIO
                                >;
                        range: gpio-range {
                                #pinctrl-single,gpio-range-cells = <3>;
index 71e2e34400d40dab63f864f347bb1203ec6873eb..0098dfdef96c0285db363155f0bdb72706193d1c 100644 (file)
                                        <&pinmux 108 16 27>,
                                        <&pinmux 135 77 6>,
                                        <&pinmux 141 67 4>,
-                                       <&pinmux 145 149 6>,
-                                       <&pinmux 151 91 4>;
+                                       <&pinmux 145 149 6>;
                };
 
                i2c1: i2c@e0000 {
index 408e0ecdce6a5b2024fb89bed1cc7902dafb5180..b032f3890c8c61c0b4e8fa92e94ef57981a18118 100644 (file)
@@ -33,7 +33,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@1 {
@@ -49,7 +49,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster0_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@100 {
@@ -65,7 +65,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@101 {
@@ -81,7 +81,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster1_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@200 {
@@ -97,7 +97,7 @@
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@201 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster2_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@300 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@301 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster3_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@400 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@401 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster4_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@500 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@501 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster5_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@600 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@601 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster6_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@700 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cpu@701 {
                        i-cache-line-size = <64>;
                        i-cache-sets = <192>;
                        next-level-cache = <&cluster7_l2>;
-                       cpu-idle-states = <&cpu_pw20>;
+                       cpu-idle-states = <&cpu_pw15>;
                };
 
                cluster0_l2: l2-cache0 {
                        cache-level = <2>;
                };
 
-               cpu_pw20: cpu-pw20 {
+               cpu_pw15: cpu-pw15 {
                        compatible = "arm,idle-state";
-                       idle-state-name = "PW20";
+                       idle-state-name = "PW15";
                        arm,psci-suspend-param = <0x0>;
                        entry-latency-us = <2000>;
                        exit-latency-us = <2000>;
index 5f9d0da196e13c695ccc340a2f5ab5bc96985f67..58b8cd06cae78e7866f8a3a2d4e3ba0617fa0834 100644 (file)
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MM_CLK_DUMMY>,
+                               clocks = <&clk IMX8MM_CLK_IPG_ROOT>,
                                         <&clk IMX8MM_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MM_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index 785f4c420fa4c518dad3bc0e20ad4528279b96e7..98496f570720407369b0242897efd0fd5207214c 100644 (file)
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                compatible = "fsl,imx8mn-usdhc", "fsl,imx7d-usdhc";
                                reg = <0x30b60000 0x10000>;
                                interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MN_CLK_DUMMY>,
+                               clocks = <&clk IMX8MN_CLK_IPG_ROOT>,
                                         <&clk IMX8MN_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MN_CLK_USDHC3_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index af99473ada04377fd32af9304f5eeac8d74b1fba..087b5b6ebe8906f932b827ca1d5261cf78c4427b 100644 (file)
@@ -89,8 +89,8 @@
                regulator-min-microvolt = <900000>;
                regulator-max-microvolt = <1000000>;
                gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
-               states = <1000000 0x0
-                          900000 0x1>;
+               states = <1000000 0x1
+                          900000 0x0>;
                regulator-always-on;
        };
 };
index 04115ca6bfb528c40a7f7cf207c57d2377c1a1ba..55a3d1c4bdf04cfc8df24adc230d23a0702c0180 100644 (file)
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b40000 0x10000>;
                                interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC1_ROOT>;
                                clock-names = "ipg", "ahb", "per";
                                             "fsl,imx7d-usdhc";
                                reg = <0x30b50000 0x10000>;
                                interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clk IMX8MQ_CLK_DUMMY>,
+                               clocks = <&clk IMX8MQ_CLK_IPG_ROOT>,
                                         <&clk IMX8MQ_CLK_NAND_USDHC_BUS>,
                                         <&clk IMX8MQ_CLK_USDHC2_ROOT>;
                                clock-names = "ipg", "ahb", "per";
index d105986c6be1a6f0a63f511b2bcd8c1d24fc1d3e..5f350cc71a2fdb08e1e7c589ef8d51267b82576f 100644 (file)
                gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
        };
 
-       usb3_phy: usb3-phy {
-               compatible = "usb-nop-xceiv";
-               vcc-supply = <&exp_usb3_vbus>;
-       };
-
        vsdc_reg: vsdc-reg {
                compatible = "regulator-gpio";
                regulator-name = "vsdc";
        status = "okay";
 };
 
+&comphy2 {
+       connector {
+               compatible = "usb-a-connector";
+               phy-supply = <&exp_usb3_vbus>;
+       };
+};
+
 &usb3 {
        status = "okay";
        phys = <&comphy2 0>;
-       usb-phy = <&usb3_phy>;
 };
 
 &mdio {
index e152b0ca0290c7f69e86f6b3c96a7ed16c5e3091..b8066868a3fe6ba79354040e20770e7c1c62a826 100644 (file)
@@ -44,7 +44,7 @@
                power-supply = <&pp3300_disp>;
 
                panel-timing {
-                       clock-frequency = <266604720>;
+                       clock-frequency = <266666667>;
                        hactive = <2400>;
                        hfront-porch = <48>;
                        hback-porch = <84>;
index 0d1f5f9a0de9558733446b2f9dec48ca12e58501..c133e8d64b2a3d2dfac63fedb401657ae7b7f380 100644 (file)
        status = "okay";
 
        u2phy0_host: host-port {
-               phy-supply = <&vcc5v0_host>;
+               phy-supply = <&vcc5v0_typec>;
                status = "okay";
        };
 
 
 &usbdrd_dwc3_0 {
        status = "okay";
-       dr_mode = "otg";
+       dr_mode = "host";
 };
 
 &usbdrd3_1 {
index 0401d4ec1f4527067485d864b183fb6103962624..e544deb61d288285610a2d28aea8ecf1a40adc17 100644 (file)
                regulator-always-on;
                regulator-boot-on;
                regulator-min-microvolt = <800000>;
-               regulator-max-microvolt = <1400000>;
+               regulator-max-microvolt = <1700000>;
                vin-supply = <&vcc5v0_sys>;
        };
 };
        rk808: pmic@1b {
                compatible = "rockchip,rk808";
                reg = <0x1b>;
-               interrupt-parent = <&gpio1>;
-               interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+               interrupt-parent = <&gpio3>;
+               interrupts = <10 IRQ_TYPE_LEVEL_LOW>;
                #clock-cells = <1>;
                clock-output-names = "xin32k", "rk808-clkout2";
                pinctrl-names = "default";
 
        pmic {
                pmic_int_l: pmic-int-l {
-                       rockchip,pins = <1 RK_PC5 RK_FUNC_GPIO &pcfg_pull_up>;
+                       rockchip,pins = <3 RK_PB2 RK_FUNC_GPIO &pcfg_pull_up>;
                };
 
                vsel1_gpio: vsel1-gpio {
 
 &sdmmc {
        bus-width = <4>;
-       cap-mmc-highspeed;
        cap-sd-highspeed;
        cd-gpios = <&gpio0 7 GPIO_ACTIVE_LOW>;
        disable-wp;
 
 &sdhci {
        bus-width = <8>;
-       mmc-hs400-1_8v;
-       mmc-hs400-enhanced-strobe;
+       mmc-hs200-1_8v;
        non-removable;
        status = "okay";
 };
index 8e05c39eab08aae420357e7714ce0e140a7c4947..c9a867ac32d48d7cd04c17e499afd7e97727df68 100644 (file)
@@ -723,7 +723,7 @@ CONFIG_TEGRA_IOMMU_SMMU=y
 CONFIG_ARM_SMMU=y
 CONFIG_ARM_SMMU_V3=y
 CONFIG_QCOM_IOMMU=y
-CONFIG_REMOTEPROC=m
+CONFIG_REMOTEPROC=y
 CONFIG_QCOM_Q6V5_MSS=m
 CONFIG_QCOM_Q6V5_PAS=m
 CONFIG_QCOM_SYSMON=m
index f74909ba29bdd30ca420be3f98acd72a2113a2cd..5bf963830b173151f4e2983ecb4481848917c85b 100644 (file)
@@ -78,10 +78,9 @@ alternative_else_nop_endif
 /*
  * Remove the address tag from a virtual address, if present.
  */
-       .macro  clear_address_tag, dst, addr
-       tst     \addr, #(1 << 55)
-       bic     \dst, \addr, #(0xff << 56)
-       csel    \dst, \dst, \addr, eq
+       .macro  untagged_addr, dst, addr
+       sbfx    \dst, \addr, #0, #56
+       and     \dst, \dst, \addr
        .endm
 
 #endif
index c6bd87d2915b4de23210ab3eba9eee4b249377db..574808b9df4c8976db12fce41519da3365c33ead 100644 (file)
@@ -321,7 +321,8 @@ static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
 }
 
 #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)                    \
-static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
+static __always_inline u##sz                                           \
+__lse__cmpxchg_case_##name##sz(volatile void *ptr,                     \
                                              u##sz old,                \
                                              u##sz new)                \
 {                                                                      \
@@ -362,7 +363,8 @@ __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
 #undef __CMPXCHG_CASE
 
 #define __CMPXCHG_DBL(name, mb, cl...)                                 \
-static inline long __lse__cmpxchg_double##name(unsigned long old1,     \
+static __always_inline long                                            \
+__lse__cmpxchg_double##name(unsigned long old1,                                \
                                         unsigned long old2,            \
                                         unsigned long new1,            \
                                         unsigned long new2,            \
index f19fe4b9acc4d148c6e9ebb535be0b03e6f9b352..ac1dbca3d0cd3db0dc5396d05d38eb7dfb679da0 100644 (file)
@@ -52,7 +52,9 @@
 #define ARM64_HAS_IRQ_PRIO_MASKING             42
 #define ARM64_HAS_DCPODP                       43
 #define ARM64_WORKAROUND_1463225               44
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_TVM    45
+#define ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM   46
 
-#define ARM64_NCAPS                            45
+#define ARM64_NCAPS                            47
 
 #endif /* __ASM_CPUCAPS_H */
index b1454d117cd2c69582d519f123fcaa66f5f81c44..aca07c2f6e6e364e84efb9708f8a743cf24e6627 100644 (file)
@@ -79,6 +79,7 @@
 #define CAVIUM_CPU_PART_THUNDERX_83XX  0x0A3
 #define CAVIUM_CPU_PART_THUNDERX2      0x0AF
 
+#define BRCM_CPU_PART_BRAHMA_B53       0x100
 #define BRCM_CPU_PART_VULCAN           0x516
 
 #define QCOM_CPU_PART_FALKOR_V1                0x800
 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
 #define MIDR_CAVIUM_THUNDERX2 MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX2)
+#define MIDR_BRAHMA_B53 MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_BRAHMA_B53)
 #define MIDR_BRCM_VULCAN MIDR_CPU_MODEL(ARM_CPU_IMP_BRCM, BRCM_CPU_PART_VULCAN)
 #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1)
 #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR)
index 86825aa2085221eb68069d7a763456c9ad5312e6..97f21cc666579c350ca625f26e4e84012bd77096 100644 (file)
 #define read_sysreg_el2(r)     read_sysreg_elx(r, _EL2, _EL1)
 #define write_sysreg_el2(v,r)  write_sysreg_elx(v, r, _EL2, _EL1)
 
-/**
- * hyp_alternate_select - Generates patchable code sequences that are
- * used to switch between two implementations of a function, depending
- * on the availability of a feature.
- *
- * @fname: a symbol name that will be defined as a function returning a
- * function pointer whose type will match @orig and @alt
- * @orig: A pointer to the default function, as returned by @fname when
- * @cond doesn't hold
- * @alt: A pointer to the alternate function, as returned by @fname
- * when @cond holds
- * @cond: a CPU feature (as described in asm/cpufeature.h)
- */
-#define hyp_alternate_select(fname, orig, alt, cond)                   \
-typeof(orig) * __hyp_text fname(void)                                  \
-{                                                                      \
-       typeof(alt) *val = orig;                                        \
-       asm volatile(ALTERNATIVE("nop           \n",                    \
-                                "mov   %0, %1  \n",                    \
-                                cond)                                  \
-                    : "+r" (val) : "r" (alt));                         \
-       return val;                                                     \
-}
-
 int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
 
 void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
index b61b50bf68b1893024e997d268fbbb3c91153d91..c23c473606647521673ee7b2274a249d1688f125 100644 (file)
@@ -215,12 +215,18 @@ static inline unsigned long kaslr_offset(void)
  * up with a tagged userland pointer. Clear the tag to get a sane pointer to
  * pass on to access_ok(), for instance.
  */
-#define untagged_addr(addr)    \
+#define __untagged_addr(addr)  \
        ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
 
+#define untagged_addr(addr)    ({                                      \
+       u64 __addr = (__force u64)addr;                                 \
+       __addr &= __untagged_addr(__addr);                              \
+       (__force __typeof__(addr))__addr;                               \
+})
+
 #ifdef CONFIG_KASAN_SW_TAGS
 #define __tag_shifted(tag)     ((u64)(tag) << 56)
-#define __tag_reset(addr)      untagged_addr(addr)
+#define __tag_reset(addr)      __untagged_addr(addr)
 #define __tag_get(addr)                (__u8)((u64)(addr) >> 56)
 #else
 #define __tag_shifted(tag)     0UL
index 9a21b84536f2e04bdf0698e20dc52aa81d952783..8dc6c5cdabe62e2f63065abd525ff516fe685387 100644 (file)
 #define PROT_DEFAULT           (_PROT_DEFAULT | PTE_MAYBE_NG)
 #define PROT_SECT_DEFAULT      (_PROT_SECT_DEFAULT | PMD_MAYBE_NG)
 
-#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
-#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
-#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
-#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+#define PROT_DEVICE_nGnRnE     (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+#define PROT_DEVICE_nGnRE      (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL_WT         (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
+#define PROT_NORMAL            (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
 
 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
 #define PROT_SECT_NORMAL       (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
@@ -80,8 +80,9 @@
 #define PAGE_S2_DEVICE         __pgprot(_PROT_DEFAULT | PAGE_S2_MEMATTR(DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_S2_XN)
 
 #define PAGE_NONE              __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
-#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+/* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
+#define PAGE_SHARED            __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC       __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
 #define PAGE_READONLY          __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
 #define PAGE_READONLY_EXEC     __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
 #define PAGE_EXECONLY          __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
index 7576df00eb50e8f7109d711846f758369a50eb85..8330810f699e31bd01794f8dc5ad17dd419646f6 100644 (file)
@@ -876,9 +876,6 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
 
 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
 
-#define kc_vaddr_to_offset(v)  ((v) & ~PAGE_END)
-#define kc_offset_to_vaddr(o)  ((o) | PAGE_END)
-
 #ifdef CONFIG_ARM64_PA_BITS_52
 #define phys_to_ttbr(addr)     (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52)
 #else
index 972d196c7714689ab46d22b6366453b7e018c721..6e919fafb43dd65b407a3ec537408b240319ce7d 100644 (file)
 #define SYS_FAR_EL1                    sys_reg(3, 0, 6, 0, 0)
 #define SYS_PAR_EL1                    sys_reg(3, 0, 7, 4, 0)
 
-#define SYS_PAR_EL1_F                  BIT(1)
+#define SYS_PAR_EL1_F                  BIT(0)
 #define SYS_PAR_EL1_FST                        GENMASK(6, 1)
 
 /*** Statistical Profiling Extension ***/
index fb60a88b5ed41f09ab6a89f3e8a8dc227a73b8c3..3fd8fd6d8fc253447fc995a40d10be38acd0b191 100644 (file)
@@ -20,7 +20,7 @@
 
 #define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
 
-#if __LINUX_ARM_ARCH__ >= 8
+#if __LINUX_ARM_ARCH__ >= 8 && defined(CONFIG_AS_DMB_ISHLD)
 #define aarch32_smp_mb()       dmb(ish)
 #define aarch32_smp_rmb()      dmb(ishld)
 #define aarch32_smp_wmb()      dmb(ishst)
diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
deleted file mode 100644 (file)
index 1f38bf3..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2012 ARM Limited
- */
-#ifndef __ASM_VDSO_DATAPAGE_H
-#define __ASM_VDSO_DATAPAGE_H
-
-#ifndef __ASSEMBLY__
-
-struct vdso_data {
-       __u64 cs_cycle_last;    /* Timebase at clocksource init */
-       __u64 raw_time_sec;     /* Raw time */
-       __u64 raw_time_nsec;
-       __u64 xtime_clock_sec;  /* Kernel time */
-       __u64 xtime_clock_nsec;
-       __u64 xtime_coarse_sec; /* Coarse time */
-       __u64 xtime_coarse_nsec;
-       __u64 wtm_clock_sec;    /* Wall to monotonic time */
-       __u64 wtm_clock_nsec;
-       __u32 tb_seq_count;     /* Timebase sequence counter */
-       /* cs_* members must be adjacent and in this order (ldp accesses) */
-       __u32 cs_mono_mult;     /* NTP-adjusted clocksource multiplier */
-       __u32 cs_shift;         /* Clocksource shift (mono = raw) */
-       __u32 cs_raw_mult;      /* Raw clocksource multiplier */
-       __u32 tz_minuteswest;   /* Whacky timezone stuff */
-       __u32 tz_dsttime;
-       __u32 use_syscall;
-       __u32 hrtimer_res;
-};
-
-#endif /* !__ASSEMBLY__ */
-
-#endif /* __ASM_VDSO_DATAPAGE_H */
diff --git a/arch/arm64/include/asm/xen/xen-ops.h b/arch/arm64/include/asm/xen/xen-ops.h
deleted file mode 100644 (file)
index e6e7840..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_XEN_OPS_H
-#define _ASM_XEN_OPS_H
-
-void xen_efi_runtime_setup(void);
-
-#endif /* _ASM_XEN_OPS_H */
index 2ec09debc2bb1d444f6e1c4281c10a0ffaa12588..ca158be21f833bcaeac18641777dc0709083b624 100644 (file)
@@ -174,6 +174,9 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops)
        struct insn_emulation *insn;
 
        insn = kzalloc(sizeof(*insn), GFP_KERNEL);
+       if (!insn)
+               return;
+
        insn->ops = ops;
        insn->min = INSN_UNDEF;
 
@@ -233,6 +236,8 @@ static void __init register_insn_emulation_sysctl(void)
 
        insns_sysctl = kcalloc(nr_insn_emulated + 1, sizeof(*sysctl),
                               GFP_KERNEL);
+       if (!insns_sysctl)
+               return;
 
        raw_spin_lock_irqsave(&insn_emulation_lock, flags);
        list_for_each_entry(insn, &insn_emulation, node) {
index 1e43ba5c79b7e4faf475909c9a86c2ad98909229..93f34b4eca2547413c8d49e8a4e735fcac403ba3 100644 (file)
@@ -12,6 +12,7 @@
 #include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpufeature.h>
+#include <asm/smp_plat.h>
 
 static bool __maybe_unused
 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
@@ -128,8 +129,8 @@ static void install_bp_hardening_cb(bp_hardening_cb_t fn,
        int cpu, slot = -1;
 
        /*
-        * enable_smccc_arch_workaround_1() passes NULL for the hyp_vecs
-        * start/end if we're a guest. Skip the hyp-vectors work.
+        * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
+        * we're a guest. Skip the hyp-vectors work.
         */
        if (!hyp_vecs_start) {
                __this_cpu_write(bp_hardening_data.fn, fn);
@@ -488,6 +489,7 @@ static const struct midr_range arm64_ssb_cpus[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        {},
 };
 
@@ -572,6 +574,7 @@ static const struct midr_range spectre_v2_safe_list[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
+       MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
        { /* sentinel */ }
 };
 
@@ -623,6 +626,30 @@ check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
        return (need_wa > 0);
 }
 
+static const __maybe_unused struct midr_range tx2_family_cpus[] = {
+       MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
+       MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
+       {},
+};
+
+static bool __maybe_unused
+needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
+                        int scope)
+{
+       int i;
+
+       if (!is_affected_midr_range_list(entry, scope) ||
+           !is_hyp_mode_available())
+               return false;
+
+       for_each_possible_cpu(i) {
+               if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
+                       return true;
+       }
+
+       return false;
+}
+
 #ifdef CONFIG_HARDEN_EL2_VECTORS
 
 static const struct midr_range arm64_harden_el2_vectors[] = {
@@ -634,17 +661,23 @@ static const struct midr_range arm64_harden_el2_vectors[] = {
 #endif
 
 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
-
-static const struct midr_range arm64_repeat_tlbi_cpus[] = {
+static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
-       MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0),
+       {
+               ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
+       },
+       {
+               .midr_range.model = MIDR_QCOM_KRYO,
+               .matches = is_kryo_midr,
+       },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_1286807
-       MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       {
+               ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
+       },
 #endif
        {},
 };
-
 #endif
 
 #ifdef CONFIG_CAVIUM_ERRATUM_27456
@@ -712,6 +745,33 @@ static const struct midr_range erratum_1418040_list[] = {
 };
 #endif
 
+#ifdef CONFIG_ARM64_ERRATUM_845719
+static const struct midr_range erratum_845719_list[] = {
+       /* Cortex-A53 r0p[01234] */
+       MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+       /* Brahma-B53 r0p[0] */
+       MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       {},
+};
+#endif
+
+#ifdef CONFIG_ARM64_ERRATUM_843419
+static const struct arm64_cpu_capabilities erratum_843419_list[] = {
+       {
+               /* Cortex-A53 r0p[01234] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               MIDR_FIXED(0x4, BIT(8)),
+       },
+       {
+               /* Brahma-B53 r0p[0] */
+               .matches = is_affected_midr_range,
+               ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
+       },
+       {},
+};
+#endif
+
 const struct arm64_cpu_capabilities arm64_errata[] = {
 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
@@ -743,19 +803,18 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_843419
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 843419",
                .capability = ARM64_WORKAROUND_843419,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
-               MIDR_FIXED(0x4, BIT(8)),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = erratum_843419_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_845719
        {
-       /* Cortex-A53 r0p[01234] */
                .desc = "ARM erratum 845719",
                .capability = ARM64_WORKAROUND_845719,
-               ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
+               ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
        },
 #endif
 #ifdef CONFIG_CAVIUM_ERRATUM_23154
@@ -791,6 +850,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
                .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = cpucap_multi_entry_cap_matches,
                .match_list = qcom_erratum_1003_list,
        },
@@ -799,7 +859,9 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
        {
                .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
-               ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus),
+               .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+               .matches = cpucap_multi_entry_cap_matches,
+               .match_list = arm64_repeat_tlbi_list,
        },
 #endif
 #ifdef CONFIG_ARM64_ERRATUM_858921
@@ -851,6 +913,19 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = has_cortex_a76_erratum_1463225,
        },
+#endif
+#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
+       {
+               .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
+               .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
+               ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+               .matches = needs_tx2_tvm_workaround,
+       },
+       {
+               .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
+               .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
+               ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
+       },
 #endif
        {
        }
index 9323bcc40a58a7c34d2ec502c35fad4633a5d118..80f459ad0190de8b4a845f440006042be05ec310 100644 (file)
@@ -136,6 +136,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 
 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
        ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
                       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
        ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
@@ -175,11 +176,16 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 };
 
 static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
-       ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
+       ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
+                      FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
        ARM64_FTR_END,
 };
 
index 84a822748c84e58c85fc19a62d63053b97f0b2c2..cf3bd2976e5747ff5c5b22ee0665700476133816 100644 (file)
@@ -604,7 +604,7 @@ el1_da:
         */
        mrs     x3, far_el1
        inherit_daif    pstate=x23, tmp=x2
-       clear_address_tag x0, x3
+       untagged_addr   x0, x3
        mov     x2, sp                          // struct pt_regs
        bl      do_mem_abort
 
@@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
        orr     x24, x24, x0
 alternative_else_nop_endif
        cbnz    x24, 1f                         // preempt count != 0 || NMI return path
-       bl      preempt_schedule_irq            // irq en/disable is done inside
+       bl      arm64_preempt_schedule_irq      // irq en/disable is done inside
 1:
 #endif
 
@@ -775,6 +775,7 @@ el0_sync_compat:
        b.ge    el0_dbg
        b       el0_inv
 el0_svc_compat:
+       gic_prio_kentry_setup tmp=x1
        mov     x0, sp
        bl      el0_svc_compat_handler
        b       ret_to_user
@@ -807,7 +808,7 @@ el0_da:
        mrs     x26, far_el1
        ct_user_exit_irqoff
        enable_daif
-       clear_address_tag x0, x26
+       untagged_addr   x0, x26
        mov     x1, x25
        mov     x2, sp
        bl      do_mem_abort
@@ -1070,7 +1071,9 @@ alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
 #else
        ldr     x30, =vectors
 #endif
+alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
        prfm    plil1strm, [x30, #(1b - tramp_vectors)]
+alternative_else_nop_endif
        msr     vbar_el1, x30
        add     x30, x30, #(1b - tramp_vectors)
        isb
index 17177325797420cea45062f349f5dde71b72119b..06e56b47031539fdd083ab365e59697e20144dfb 100644 (file)
@@ -121,10 +121,16 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
                        /*
                         * Ensure updated trampoline is visible to instruction
-                        * fetch before we patch in the branch.
+                        * fetch before we patch in the branch. Although the
+                        * architecture doesn't require an IPI in this case,
+                        * Neoverse-N1 erratum #1542419 does require one
+                        * if the TLB maintenance in module_enable_ro() is
+                        * skipped due to rodata_enabled. It doesn't seem worth
+                        * it to make it conditional given that this is
+                        * certainly not a fast-path.
                         */
-                       __flush_icache_range((unsigned long)&dst[0],
-                                            (unsigned long)&dst[1]);
+                       flush_icache_range((unsigned long)&dst[0],
+                                          (unsigned long)&dst[1]);
                }
                addr = (unsigned long)dst;
 #else /* CONFIG_ARM64_MODULE_PLTS */
index e0a7fce0e01c1d5533a6dd19fed7431dd3fa3ed5..a96b2921d22c2f12b4fdbb1bb1a4b918d8bd66c5 100644 (file)
@@ -201,6 +201,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
                                 gfp_t mask)
 {
        int rc = 0;
+       pgd_t *trans_pgd;
        pgd_t *pgdp;
        pud_t *pudp;
        pmd_t *pmdp;
@@ -215,7 +216,13 @@ static int create_safe_exec_page(void *src_start, size_t length,
        memcpy((void *)dst, src_start, length);
        __flush_icache_range(dst, dst + length);
 
-       pgdp = pgd_offset_raw(allocator(mask), dst_addr);
+       trans_pgd = allocator(mask);
+       if (!trans_pgd) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       pgdp = pgd_offset_raw(trans_pgd, dst_addr);
        if (pgd_none(READ_ONCE(*pgdp))) {
                pudp = allocator(mask);
                if (!pudp) {
index a47462def04bf5d67acc21f43714198603d1f295..71f788cd2b18772629c4627be979f75753253d08 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/sched/task.h>
 #include <linux/sched/task_stack.h>
 #include <linux/kernel.h>
+#include <linux/lockdep.h>
 #include <linux/mm.h>
 #include <linux/stddef.h>
 #include <linux/sysctl.h>
@@ -44,6 +45,7 @@
 #include <asm/alternative.h>
 #include <asm/arch_gicv3.h>
 #include <asm/compat.h>
+#include <asm/cpufeature.h>
 #include <asm/cacheflush.h>
 #include <asm/exec.h>
 #include <asm/fpsimd.h>
@@ -332,22 +334,27 @@ void arch_release_task_struct(struct task_struct *tsk)
        fpsimd_release_task(tsk);
 }
 
-/*
- * src and dst may temporarily have aliased sve_state after task_struct
- * is copied.  We cannot fix this properly here, because src may have
- * live SVE state and dst's thread_info may not exist yet, so tweaking
- * either src's or dst's TIF_SVE is not safe.
- *
- * The unaliasing is done in copy_thread() instead.  This works because
- * dst is not schedulable or traceable until both of these functions
- * have been called.
- */
 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        if (current->mm)
                fpsimd_preserve_current_state();
        *dst = *src;
 
+       /* We rely on the above assignment to initialize dst's thread_flags: */
+       BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK));
+
+       /*
+        * Detach src's sve_state (if any) from dst so that it does not
+        * get erroneously used or freed prematurely.  dst's sve_state
+        * will be allocated on demand later on if dst uses SVE.
+        * For consistency, also clear TIF_SVE here: this could be done
+        * later in copy_process(), but to avoid tripping up future
+        * maintainers it is best not to leave TIF_SVE and sve_state in
+        * an inconsistent state, even temporarily.
+        */
+       dst->thread.sve_state = NULL;
+       clear_tsk_thread_flag(dst, TIF_SVE);
+
        return 0;
 }
 
@@ -360,13 +367,6 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
 
        memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
 
-       /*
-        * Unalias p->thread.sve_state (if any) from the parent task
-        * and disable discard SVE state for p:
-        */
-       clear_tsk_thread_flag(p, TIF_SVE);
-       p->thread.sve_state = NULL;
-
        /*
         * In case p was allocated the same task_struct pointer as some
         * other recently-exited task, make sure p is disassociated from
@@ -633,3 +633,19 @@ static int __init tagged_addr_init(void)
 
 core_initcall(tagged_addr_init);
 #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
+
+asmlinkage void __sched arm64_preempt_schedule_irq(void)
+{
+       lockdep_assert_irqs_disabled();
+
+       /*
+        * Preempting a task from an IRQ means we leave copies of PSTATE
+        * on the stack. cpufeature's enable calls may modify PSTATE, but
+        * resuming one of these preempted tasks would undo those changes.
+        *
+        * Only allow a task to be preempted once cpufeatures have been
+        * enabled.
+        */
+       if (static_branch_likely(&arm64_const_caps_ready))
+               preempt_schedule_irq();
+}
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
deleted file mode 100644 (file)
index e69de29..0000000
index 1fba0776ed40edca833e7cabe936dfb909ae91a1..76b327f88fbb16d8b53c56efa741ae3623691a7e 100644 (file)
@@ -8,15 +8,21 @@
 ARCH_REL_TYPE_ABS := R_ARM_JUMP_SLOT|R_ARM_GLOB_DAT|R_ARM_ABS32
 include $(srctree)/lib/vdso/Makefile
 
-COMPATCC := $(CROSS_COMPILE_COMPAT)gcc
+# Same as cc-*option, but using CC_COMPAT instead of CC
+ifeq ($(CONFIG_CC_IS_CLANG), y)
+CC_COMPAT ?= $(CC)
+else
+CC_COMPAT ?= $(CROSS_COMPILE_COMPAT)gcc
+endif
 
-# Same as cc-*option, but using COMPATCC instead of CC
 cc32-option = $(call try-run,\
-        $(COMPATCC) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
+        $(CC_COMPAT) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
 cc32-disable-warning = $(call try-run,\
-       $(COMPATCC) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
+       $(CC_COMPAT) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 cc32-ldoption = $(call try-run,\
-        $(COMPATCC) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+        $(CC_COMPAT) $(1) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2))
+cc32-as-instr = $(call try-run,\
+       printf "%b\n" "$(1)" | $(CC_COMPAT) $(VDSO_AFLAGS) -c -x assembler -o "$$TMP" -,$(2),$(3))
 
 # We cannot use the global flags to compile the vDSO files, the main reason
 # being that the 32-bit compiler may be older than the main (64-bit) compiler
@@ -25,22 +31,21 @@ cc32-ldoption = $(call try-run,\
 # arm64 one.
 # As a result we set our own flags here.
 
-# From top-level Makefile
-# NOSTDINC_FLAGS
-VDSO_CPPFLAGS := -nostdinc -isystem $(shell $(COMPATCC) -print-file-name=include)
+# KBUILD_CPPFLAGS and NOSTDINC_FLAGS from top-level Makefile
+VDSO_CPPFLAGS := -D__KERNEL__ -nostdinc -isystem $(shell $(CC_COMPAT) -print-file-name=include)
 VDSO_CPPFLAGS += $(LINUXINCLUDE)
-VDSO_CPPFLAGS += $(KBUILD_CPPFLAGS)
 
 # Common C and assembly flags
 # From top-level Makefile
 VDSO_CAFLAGS := $(VDSO_CPPFLAGS)
+ifneq ($(shell $(CC_COMPAT) --version 2>&1 | head -n 1 | grep clang),)
+VDSO_CAFLAGS += --target=$(notdir $(CROSS_COMPILE_COMPAT:%-=%))
+endif
+
 VDSO_CAFLAGS += $(call cc32-option,-fno-PIE)
 ifdef CONFIG_DEBUG_INFO
 VDSO_CAFLAGS += -g
 endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(COMPATCC)), y)
-VDSO_CAFLAGS += -DCC_HAVE_ASM_GOTO
-endif
 
 # From arm Makefile
 VDSO_CAFLAGS += $(call cc32-option,-fno-dwarf2-cfi-asm)
@@ -55,6 +60,7 @@ endif
 VDSO_CAFLAGS += -fPIC -fno-builtin -fno-stack-protector
 VDSO_CAFLAGS += -DDISABLE_BRANCH_PROFILING
 
+
 # Try to compile for ARMv8. If the compiler is too old and doesn't support it,
 # fall back to v7. There is no easy way to check for what architecture the code
 # is being compiled, so define a macro specifying that (see arch/arm/Makefile).
@@ -91,6 +97,12 @@ VDSO_CFLAGS += -Wno-int-to-pointer-cast
 VDSO_AFLAGS := $(VDSO_CAFLAGS)
 VDSO_AFLAGS += -D__ASSEMBLY__
 
+# Check for binutils support for dmb ishld
+dmbinstr := $(call cc32-as-instr,dmb ishld,-DCONFIG_AS_DMB_ISHLD=1)
+
+VDSO_CFLAGS += $(dmbinstr)
+VDSO_AFLAGS += $(dmbinstr)
+
 VDSO_LDFLAGS := $(VDSO_CPPFLAGS)
 # From arm vDSO Makefile
 VDSO_LDFLAGS += -Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1
@@ -159,14 +171,14 @@ quiet_cmd_vdsold_and_vdso_check = LD32    $@
       cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
 
 quiet_cmd_vdsold = LD32    $@
-      cmd_vdsold = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
+      cmd_vdsold = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_LDFLAGS) \
                    -Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
 quiet_cmd_vdsocc = CC32    $@
-      cmd_vdsocc = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
+      cmd_vdsocc = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) -c -o $@ $<
 quiet_cmd_vdsocc_gettimeofday = CC32    $@
-      cmd_vdsocc_gettimeofday = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
+      cmd_vdsocc_gettimeofday = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_CFLAGS) $(VDSO_CFLAGS_gettimeofday_o) -c -o $@ $<
 quiet_cmd_vdsoas = AS32    $@
-      cmd_vdsoas = $(COMPATCC) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
+      cmd_vdsoas = $(CC_COMPAT) -Wp,-MD,$(depfile) $(VDSO_AFLAGS) -c -o $@ $<
 
 quiet_cmd_vdsomunge = MUNGE   $@
       cmd_vdsomunge = $(obj)/$(munge) $< $@
index bd978ad71936dc7413c0d077f7d6c18cdc8f48ac..799e84a403351df81d2f38d1e92f6e4850b7b002 100644 (file)
@@ -124,6 +124,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
 {
        u64 hcr = vcpu->arch.hcr_el2;
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
+               hcr |= HCR_TVM;
+
        write_sysreg(hcr, hcr_el2);
 
        if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
@@ -174,8 +177,10 @@ static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
         * the crucial bit is "On taking a vSError interrupt,
         * HCR_EL2.VSE is cleared to 0."
         */
-       if (vcpu->arch.hcr_el2 & HCR_VSE)
-               vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
+       if (vcpu->arch.hcr_el2 & HCR_VSE) {
+               vcpu->arch.hcr_el2 &= ~HCR_VSE;
+               vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
+       }
 
        if (has_vhe())
                deactivate_traps_vhe();
@@ -229,20 +234,6 @@ static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
        }
 }
 
-static bool __hyp_text __true_value(void)
-{
-       return true;
-}
-
-static bool __hyp_text __false_value(void)
-{
-       return false;
-}
-
-static hyp_alternate_select(__check_arm_834220,
-                           __false_value, __true_value,
-                           ARM64_WORKAROUND_834220);
-
 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
 {
        u64 par, tmp;
@@ -298,7 +289,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
         * resolve the IPA using the AT instruction.
         */
        if (!(esr & ESR_ELx_S1PTW) &&
-           (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+           (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
+            (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
                if (!__translate_far_to_hpfar(far, &hpfar))
                        return false;
        } else {
@@ -393,6 +385,61 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
        return true;
 }
 
+static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
+{
+       u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
+       int rt = kvm_vcpu_sys_get_rt(vcpu);
+       u64 val = vcpu_get_reg(vcpu, rt);
+
+       /*
+        * The normal sysreg handling code expects to see the traps,
+        * let's not do anything here.
+        */
+       if (vcpu->arch.hcr_el2 & HCR_TVM)
+               return false;
+
+       switch (sysreg) {
+       case SYS_SCTLR_EL1:
+               write_sysreg_el1(val, SYS_SCTLR);
+               break;
+       case SYS_TTBR0_EL1:
+               write_sysreg_el1(val, SYS_TTBR0);
+               break;
+       case SYS_TTBR1_EL1:
+               write_sysreg_el1(val, SYS_TTBR1);
+               break;
+       case SYS_TCR_EL1:
+               write_sysreg_el1(val, SYS_TCR);
+               break;
+       case SYS_ESR_EL1:
+               write_sysreg_el1(val, SYS_ESR);
+               break;
+       case SYS_FAR_EL1:
+               write_sysreg_el1(val, SYS_FAR);
+               break;
+       case SYS_AFSR0_EL1:
+               write_sysreg_el1(val, SYS_AFSR0);
+               break;
+       case SYS_AFSR1_EL1:
+               write_sysreg_el1(val, SYS_AFSR1);
+               break;
+       case SYS_MAIR_EL1:
+               write_sysreg_el1(val, SYS_MAIR);
+               break;
+       case SYS_AMAIR_EL1:
+               write_sysreg_el1(val, SYS_AMAIR);
+               break;
+       case SYS_CONTEXTIDR_EL1:
+               write_sysreg_el1(val, SYS_CONTEXTIDR);
+               break;
+       default:
+               return false;
+       }
+
+       __kvm_skip_instr(vcpu);
+       return true;
+}
+
 /*
  * Return true when we were able to fixup the guest exit and should return to
  * the guest, false when we should restore the host state and return to the
@@ -412,6 +459,11 @@ static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
        if (*exit_code != ARM_EXCEPTION_TRAP)
                goto exit;
 
+       if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
+           kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
+           handle_tx2_tvm(vcpu))
+               return true;
+
        /*
         * We trap the first access to the FP/SIMD to save the host context
         * and restore the guest context lazily.
index c466060b76d69d11630244be356a0d46fad08371..eb0efc5557f302c8e340fdbf60fae2c4ab0a5930 100644 (file)
@@ -67,10 +67,14 @@ static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
        isb();
 }
 
-static hyp_alternate_select(__tlb_switch_to_guest,
-                           __tlb_switch_to_guest_nvhe,
-                           __tlb_switch_to_guest_vhe,
-                           ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_guest(struct kvm *kvm,
+                                            struct tlb_inv_context *cxt)
+{
+       if (has_vhe())
+               __tlb_switch_to_guest_vhe(kvm, cxt);
+       else
+               __tlb_switch_to_guest_nvhe(kvm, cxt);
+}
 
 static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
                                                struct tlb_inv_context *cxt)
@@ -98,10 +102,14 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
        write_sysreg(0, vttbr_el2);
 }
 
-static hyp_alternate_select(__tlb_switch_to_host,
-                           __tlb_switch_to_host_nvhe,
-                           __tlb_switch_to_host_vhe,
-                           ARM64_HAS_VIRT_HOST_EXTN);
+static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,
+                                           struct tlb_inv_context *cxt)
+{
+       if (has_vhe())
+               __tlb_switch_to_host_vhe(kvm, cxt);
+       else
+               __tlb_switch_to_host_nvhe(kvm, cxt);
+}
 
 void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 {
@@ -111,7 +119,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        /*
         * We could do so much better if we had the VA as well.
@@ -154,7 +162,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
        if (!has_vhe() && icache_is_vpipt())
                __flush_icache_all();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
@@ -165,13 +173,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
 
        /* Switch to requested VMID */
        kvm = kern_hyp_va(kvm);
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        __tlbi(vmalls12e1is);
        dsb(ish);
        isb();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
@@ -180,13 +188,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
        struct tlb_inv_context cxt;
 
        /* Switch to requested VMID */
-       __tlb_switch_to_guest()(kvm, &cxt);
+       __tlb_switch_to_guest(kvm, &cxt);
 
        __tlbi(vmalle1);
        dsb(nsh);
        isb();
 
-       __tlb_switch_to_host()(kvm, &cxt);
+       __tlb_switch_to_host(kvm, &cxt);
 }
 
 void __hyp_text __kvm_flush_vm_context(void)
index 2071260a275bd236923da5650da86a012eaa5b4a..46822afc57e00461843b56f72cd403a939f17e69 100644 (file)
@@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
         */
        val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
               | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+       if (!system_supports_32bit_el0())
+               val |= ARMV8_PMU_PMCR_LC;
        __vcpu_sys_reg(vcpu, r->reg) = val;
 }
 
@@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
                val = __vcpu_sys_reg(vcpu, PMCR_EL0);
                val &= ~ARMV8_PMU_PMCR_MASK;
                val |= p->regval & ARMV8_PMU_PMCR_MASK;
+               if (!system_supports_32bit_el0())
+                       val |= ARMV8_PMU_PMCR_LC;
                __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
                kvm_pmu_handle_pmcr(vcpu, val);
                kvm_vcpu_pmu_restore_guest(vcpu);
index 115d7a0e4b082679ed2a651c90d8393609f6d1c2..9fc6db0bcbad0636f2ee6919cc2a04d5f8bd4127 100644 (file)
@@ -113,6 +113,15 @@ static inline bool is_ttbr1_addr(unsigned long addr)
        return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
 }
 
+static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm)
+{
+       /* Either init_pg_dir or swapper_pg_dir */
+       if (mm == &init_mm)
+               return __pa_symbol(mm->pgd);
+
+       return (unsigned long)virt_to_phys(mm->pgd);
+}
+
 /*
  * Dump out the page tables associated with 'addr' in the currently active mm.
  */
@@ -141,7 +150,7 @@ static void show_pte(unsigned long addr)
 
        pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n",
                 mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
-                vabits_actual, (unsigned long)virt_to_phys(mm->pgd));
+                vabits_actual, mm_to_pgd_phys(mm));
        pgdp = pgd_offset(mm, addr);
        pgd = READ_ONCE(*pgdp);
        pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
@@ -259,14 +268,18 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
        par = read_sysreg(par_el1);
        local_irq_restore(flags);
 
+       /*
+        * If we now have a valid translation, treat the translation fault as
+        * spurious.
+        */
        if (!(par & SYS_PAR_EL1_F))
-               return false;
+               return true;
 
        /*
         * If we got a different type of fault from the AT instruction,
         * treat the translation fault as spurious.
         */
-       dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par);
+       dfsc = FIELD_GET(SYS_PAR_EL1_FST, par);
        return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT;
 }
 
index a4fc65f3928d434161cf1536c642b5060229f94f..b66215e8658e240e7969addf2d8707528b780510 100644 (file)
@@ -1,4 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 xen-arm-y      += $(addprefix ../../arm/xen/, enlighten.o grant-table.o p2m.o mm.o)
 obj-y          := xen-arm.o hypercall.o
-obj-$(CONFIG_XEN_EFI) += $(addprefix ../../arm/xen/, efi.o)
index 77a836e661c9eea91bc7582dd6ed435f5aba8f90..df69eaa453a1c299b8467c3b972ef8a225fc5706 100644 (file)
@@ -84,7 +84,7 @@ void __init prom_init(void)
                 * Here we will start up CPU1 in the background and ask it to
                 * reconfigure itself then go back to sleep.
                 */
-               memcpy((void *)0xa0000200, &bmips_smp_movevec, 0x20);
+               memcpy((void *)0xa0000200, bmips_smp_movevec, 0x20);
                __sync();
                set_c0_cause(C_SW0);
                cpumask_set_cpu(1, &bmips_booted_mask);
index 63a9f33aa43e8de626e59cbf57fddf77528ef4f8..5cfc9d347826a0b3a2d77977d9cffa3726e38972 100644 (file)
@@ -99,7 +99,7 @@
 
                        miscintc: interrupt-controller@18060010 {
                                compatible = "qca,ar7240-misc-intc";
-                               reg = <0x18060010 0x4>;
+                               reg = <0x18060010 0x8>;
 
                                interrupt-parent = <&cpuintc>;
                                interrupts = <6>;
index 16bef819fe9897882778ca501ca48591afc2d1e9..914af125a7fa28845efcb889035261d7d4ecd783 100644 (file)
@@ -571,7 +571,6 @@ CONFIG_USB_SERIAL_OMNINET=m
 CONFIG_USB_EMI62=m
 CONFIG_USB_EMI26=m
 CONFIG_USB_ADUTUX=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYPRESS_CY7C63=m
index 8762e75f5d5f14dffe5dce6aeffb79da12bd6e7f..2c7adea7638f5de66fed440b231becfb875b55c0 100644 (file)
@@ -314,7 +314,6 @@ CONFIG_USB_SERIAL_SAFE_PADDED=y
 CONFIG_USB_SERIAL_CYBERJACK=m
 CONFIG_USB_SERIAL_XIRCOM=m
 CONFIG_USB_SERIAL_OMNINET=m
-CONFIG_USB_RIO500=m
 CONFIG_USB_LEGOTOWER=m
 CONFIG_USB_LCD=m
 CONFIG_USB_CYTHERM=m
index af44b35d79a1d95820354fe410e978e365552a52..b4328b3b5288e8a6a04aa75d31137a1f95aacfca 100644 (file)
@@ -160,7 +160,6 @@ void __init prom_meminit(void)
 
 void __init prom_free_prom_memory(void)
 {
-       unsigned long addr;
        int i;
 
        if (prom_flags & PROM_FLAG_DONT_FREE_TEMP)
index 8772617b64cefec0835523fe107a2febdaa778ab..80112f2298b68cce9ba80e994167eaec0c435098 100644 (file)
@@ -43,7 +43,7 @@
 
 /* O32 stack has to be 8-byte aligned. */
 static u64 o32_stk[4096];
-#define O32_STK          &o32_stk[sizeof(o32_stk)]
+#define O32_STK          (&o32_stk[ARRAY_SIZE(o32_stk)])
 
 #define __PROM_O32(fun, arg) fun arg __asm__(#fun); \
                                     __asm__(#fun " = call_o32")
index bf6a8afd7ad2783dff63cb55954a2e34d8ca6e79..581a6a3c66e405c6ea23cdfefc45690a2fe43209 100644 (file)
@@ -75,11 +75,11 @@ static inline int register_bmips_smp_ops(void)
 #endif
 }
 
-extern char bmips_reset_nmi_vec;
-extern char bmips_reset_nmi_vec_end;
-extern char bmips_smp_movevec;
-extern char bmips_smp_int_vec;
-extern char bmips_smp_int_vec_end;
+extern char bmips_reset_nmi_vec[];
+extern char bmips_reset_nmi_vec_end[];
+extern char bmips_smp_movevec[];
+extern char bmips_smp_int_vec[];
+extern char bmips_smp_int_vec_end[];
 
 extern int bmips_smp_enabled;
 extern int bmips_cpu_offset;
index 79bf34efbc04bb7061483f2e43d9fccb94263d68..f6136871561dc97187543573e49262ead31f9df8 100644 (file)
@@ -77,8 +77,8 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
 extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
                                  unsigned int size);
 
-static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
-                                  int size)
+static __always_inline
+unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
 {
        switch (size) {
        case 1:
@@ -153,8 +153,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
 extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
                                     unsigned long new, unsigned int size);
 
-static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
-                                     unsigned long new, unsigned int size)
+static __always_inline
+unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
+                       unsigned long new, unsigned int size)
 {
        switch (size) {
        case 1:
index cbdc14b7743532fae1c2e5ee2d1144e4a97f36ba..adab7b54c3b421ee6efe4b11ad3a4bc53034f9f1 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/octeon/octeon-feature.h>
 
 #include <asm/octeon/cvmx-ipd-defs.h>
+#include <asm/octeon/cvmx-pip-defs.h>
 
 enum cvmx_ipd_mode {
    CVMX_IPD_OPC_MODE_STT = 0LL,          /* All blocks DRAM, not cached in L2 */
index 071053ece6772b7a8f00075f5ba0d1554ed9626c..5d70babfc9ee53a26c3ff3765a4cfa6bca6e9742 100644 (file)
@@ -52,6 +52,7 @@
 # endif
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
 
 /* whitelists for checksyscalls */
 #define __IGNORE_fadvise64_64
index e78462e8ca2e74311028c78b6be52a186fc83b9a..b08825531e9f9a3ce89e926ffc3e096fbfe14fc0 100644 (file)
@@ -24,6 +24,8 @@
 
 #define VDSO_HAS_CLOCK_GETRES          1
 
+#define __VDSO_USE_SYSCALL             ULLONG_MAX
+
 #ifdef CONFIG_MIPS_CLOCK_VSYSCALL
 
 static __always_inline long gettimeofday_fallback(
@@ -205,7 +207,7 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
                break;
 #endif
        default:
-               cycle_now = 0;
+               cycle_now = __VDSO_USE_SYSCALL;
                break;
        }
 
index a2aba4b059e63ff535ac3f46d35efa67ef258bf0..1ade1daa49210713c53bdd20af2564eeb628e3cd 100644 (file)
@@ -6,5 +6,16 @@
 #define HWCAP_MIPS_R6          (1 << 0)
 #define HWCAP_MIPS_MSA         (1 << 1)
 #define HWCAP_MIPS_CRC32       (1 << 2)
+#define HWCAP_MIPS_MIPS16      (1 << 3)
+#define HWCAP_MIPS_MDMX     (1 << 4)
+#define HWCAP_MIPS_MIPS3D   (1 << 5)
+#define HWCAP_MIPS_SMARTMIPS (1 << 6)
+#define HWCAP_MIPS_DSP      (1 << 7)
+#define HWCAP_MIPS_DSP2     (1 << 8)
+#define HWCAP_MIPS_DSP3     (1 << 9)
+#define HWCAP_MIPS_MIPS16E2 (1 << 10)
+#define HWCAP_LOONGSON_MMI  (1 << 11)
+#define HWCAP_LOONGSON_EXT  (1 << 12)
+#define HWCAP_LOONGSON_EXT2 (1 << 13)
 
 #endif /* _UAPI_ASM_HWCAP_H */
index fa62cd1dff9360220f0c2f354b7a31616bb9ec19..6a7afe7ef4d356c005a90e711df0603a18302969 100644 (file)
@@ -24,7 +24,8 @@ static char r4kwar[] __initdata =
 static char daddiwar[] __initdata =
        "Enable CPU_DADDI_WORKAROUNDS to rectify.";
 
-static inline void align_mod(const int align, const int mod)
+static __always_inline __init
+void align_mod(const int align, const int mod)
 {
        asm volatile(
                ".set   push\n\t"
@@ -38,8 +39,9 @@ static inline void align_mod(const int align, const int mod)
                : "n"(align), "n"(mod));
 }
 
-static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
-                                             const int align, const int mod)
+static __always_inline __init
+void mult_sh_align_mod(long *v1, long *v2, long *w,
+                      const int align, const int mod)
 {
        unsigned long flags;
        int m1, m2;
@@ -113,7 +115,7 @@ static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
        *w = lw;
 }
 
-static inline void check_mult_sh(void)
+static __always_inline __init void check_mult_sh(void)
 {
        long v1[8], v2[8], w[8];
        int bug, fix, i;
@@ -176,7 +178,7 @@ asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
        exception_exit(prev_state);
 }
 
-static inline void check_daddi(void)
+static __init void check_daddi(void)
 {
        extern asmlinkage void handle_daddi_ov(void);
        unsigned long flags;
@@ -242,7 +244,7 @@ static inline void check_daddi(void)
 
 int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
-static inline void check_daddiu(void)
+static __init void check_daddiu(void)
 {
        long v, w, tmp;
 
index c2eb392597bf65a4320877f41839712e8a90b6aa..f521cbf934e769bb33f212e66edcafac9ee5aac0 100644 (file)
@@ -2180,6 +2180,39 @@ void cpu_probe(void)
                elf_hwcap |= HWCAP_MIPS_MSA;
        }
 
+       if (cpu_has_mips16)
+               elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+       if (cpu_has_mdmx)
+               elf_hwcap |= HWCAP_MIPS_MDMX;
+
+       if (cpu_has_mips3d)
+               elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+       if (cpu_has_smartmips)
+               elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+       if (cpu_has_dsp)
+               elf_hwcap |= HWCAP_MIPS_DSP;
+
+       if (cpu_has_dsp2)
+               elf_hwcap |= HWCAP_MIPS_DSP2;
+
+       if (cpu_has_dsp3)
+               elf_hwcap |= HWCAP_MIPS_DSP3;
+
+       if (cpu_has_mips16e2)
+               elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+       if (cpu_has_loongson_mmi)
+               elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+       if (cpu_has_loongson_ext)
+               elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+       if (cpu_has_loongson_ext2)
+               elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
        if (cpu_has_vz)
                cpu_probe_vz(c);
 
index b8249c233754b8399a4ca55b3c8b9ebc1f60ab3d..5eec13b8d222d3940dba7bbf022f52dbfc43fb5b 100644 (file)
@@ -108,6 +108,9 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
                return;
        }
 
+       if (start < PHYS_OFFSET)
+               return;
+
        memblock_add(start, size);
        /* Reserve any memory except the ordinary RAM ranges. */
        switch (type) {
@@ -321,7 +324,7 @@ static void __init bootmem_init(void)
         * Reserve any memory between the start of RAM and PHYS_OFFSET
         */
        if (ramstart > PHYS_OFFSET)
-               memblock_reserve(PHYS_OFFSET, PFN_UP(ramstart) - PHYS_OFFSET);
+               memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
 
        if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
                pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
index 76fae9b79f1312c1997e399f49d940494bd8b689..712c15de6ab9faa9d856b875963432d40ef610a1 100644 (file)
@@ -464,10 +464,10 @@ static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 
 static inline void bmips_nmi_handler_setup(void)
 {
-       bmips_wr_vec(BMIPS_NMI_RESET_VEC, &bmips_reset_nmi_vec,
-               &bmips_reset_nmi_vec_end);
-       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, &bmips_smp_int_vec,
-               &bmips_smp_int_vec_end);
+       bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+               bmips_reset_nmi_vec_end);
+       bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+               bmips_smp_int_vec_end);
 }
 
 struct reset_vec_info {
index b0e25e913bdb956531c4609c762672b8d113a632..3f16f382303103b8ead5374c8c75319680c146bd 100644 (file)
@@ -80,6 +80,7 @@ SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
 
 save_static_function(sys_fork);
 save_static_function(sys_clone);
+save_static_function(sys_clone3);
 
 SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
 {
index c9c879ec9b6d6455e08a38d9b734d82b757986ae..e7c5ab38e403c0dee1321134847d13c5855f32a8 100644 (file)
 432    n32     fsmount                         sys_fsmount
 433    n32     fspick                          sys_fspick
 434    n32     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    n32     clone3                          __sys_clone3
index bbce9159caa122ebaa762f1115459c22488c425d..13cd66581f3bed758b312165c28a4a75dc812942 100644 (file)
 432    n64     fsmount                         sys_fsmount
 433    n64     fspick                          sys_fspick
 434    n64     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    n64     clone3                          __sys_clone3
index 9653591428ecd419a66238d54051262821736d2b..353539ea4140a410fc258650a08544da05f9cd23 100644 (file)
 432    o32     fsmount                         sys_fsmount
 433    o32     fspick                          sys_fspick
 434    o32     pidfd_open                      sys_pidfd_open
-# 435 reserved for clone3
+435    o32     clone3                          __sys_clone3
index c1a4d4dc46655fe914c1927cb8088969e5909318..9f79908f5063e882c1ef01a927551484c8738fa4 100644 (file)
@@ -66,6 +66,10 @@ else
       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
 endif
 
+# Some -march= flags enable MMI instructions, and GCC complains about that
+# support being enabled alongside -msoft-float. Thus explicitly disable MMI.
+cflags-y += $(call cc-option,-mno-loongson-mmi)
+
 #
 # Loongson Machines' Support
 #
index 4abb92e0fc3960907f47f7d1b33360fff97bee77..4254ac4ec616fbab44a92d060fa455524ee61918 100644 (file)
@@ -3,6 +3,7 @@
  */
 #include <linux/fs.h>
 #include <linux/fcntl.h>
+#include <linux/memblock.h>
 #include <linux/mm.h>
 
 #include <asm/bootinfo.h>
@@ -64,24 +65,22 @@ void __init prom_init_memory(void)
                node_id = loongson_memmap->map[i].node_id;
                mem_type = loongson_memmap->map[i].mem_type;
 
-               if (node_id == 0) {
-                       switch (mem_type) {
-                       case SYSTEM_RAM_LOW:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RAM);
-                               break;
-                       case SYSTEM_RAM_HIGH:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RAM);
-                               break;
-                       case SYSTEM_RAM_RESERVED:
-                               add_memory_region(loongson_memmap->map[i].mem_start,
-                                       (u64)loongson_memmap->map[i].mem_size << 20,
-                                       BOOT_MEM_RESERVED);
-                               break;
-                       }
+               if (node_id != 0)
+                       continue;
+
+               switch (mem_type) {
+               case SYSTEM_RAM_LOW:
+                       memblock_add(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
+               case SYSTEM_RAM_HIGH:
+                       memblock_add(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
+               case SYSTEM_RAM_RESERVED:
+                       memblock_reserve(loongson_memmap->map[i].mem_start,
+                               (u64)loongson_memmap->map[i].mem_size << 20);
+                       break;
                }
        }
 }
index ffefc1cb26121e7b653b64b661b267cac4b4e1cd..98c3a7feb10f8b2391c968661e5e76c8d6c2770e 100644 (file)
@@ -110,7 +110,7 @@ static int __init serial_init(void)
 }
 module_init(serial_init);
 
-static void __init serial_exit(void)
+static void __exit serial_exit(void)
 {
        platform_device_unregister(&uart8250_device);
 }
index 414e97de5dc090524989f7aae5e1135619b31eb5..8f20d2cb376728162466e3e2e5249eaf48528486 100644 (file)
@@ -142,8 +142,6 @@ static void __init szmem(unsigned int node)
                                (u32)node_id, mem_type, mem_start, mem_size);
                        pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
                                start_pfn, end_pfn, num_physpages);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RAM);
                        memblock_add_node(PFN_PHYS(start_pfn),
                                PFN_PHYS(end_pfn - start_pfn), node);
                        break;
@@ -156,16 +154,12 @@ static void __init szmem(unsigned int node)
                                (u32)node_id, mem_type, mem_start, mem_size);
                        pr_info("       start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
                                start_pfn, end_pfn, num_physpages);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RAM);
                        memblock_add_node(PFN_PHYS(start_pfn),
                                PFN_PHYS(end_pfn - start_pfn), node);
                        break;
                case SYSTEM_RAM_RESERVED:
                        pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
                                (u32)node_id, mem_type, mem_start, mem_size);
-                       add_memory_region((node_id << 44) + mem_start,
-                               (u64)mem_size << 20, BOOT_MEM_RESERVED);
                        memblock_reserve(((node_id << 44) + mem_start),
                                mem_size << 20);
                        break;
@@ -191,8 +185,6 @@ static void __init node_mem_init(unsigned int node)
        NODE_DATA(node)->node_start_pfn = start_pfn;
        NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn;
 
-       free_bootmem_with_active_regions(node, end_pfn);
-
        if (node == 0) {
                /* kernel end address */
                unsigned long kernel_end_pfn = PFN_UP(__pa_symbol(&_end));
@@ -209,8 +201,6 @@ static void __init node_mem_init(unsigned int node)
                        memblock_reserve((node_addrspace_offset | 0xfe000000),
                                         32 << 20);
        }
-
-       sparse_memory_present_with_active_regions(node);
 }
 
 static __init void prom_meminit(void)
@@ -227,6 +217,7 @@ static __init void prom_meminit(void)
                        cpumask_clear(&__node_data[(node)]->cpumask);
                }
        }
+       memblocks_present();
        max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
 
        for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
index e01cb33bfa1a78a67638d5b27fb8fc7e43df3e09..41bb91f056885f5589e5b2a278cdcec2d83be659 100644 (file)
@@ -653,6 +653,13 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                                   int restore_scratch)
 {
        if (restore_scratch) {
+               /*
+                * Ensure the MFC0 below observes the value written to the
+                * KScratch register by the prior MTC0.
+                */
+               if (scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
                        uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
@@ -667,12 +674,10 @@ static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
                        uasm_i_mtc0(p, 0, C0_PAGEMASK);
                        uasm_il_b(p, r, lid);
                }
-               if (scratch_reg >= 0) {
-                       uasm_i_ehb(p);
+               if (scratch_reg >= 0)
                        UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-               } else {
+               else
                        UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-               }
        } else {
                /* Reset default page size */
                if (PM_DEFAULT_MASK >> 16) {
@@ -921,6 +926,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
        }
        if (mode != not_refill && check_for_high_segbits) {
                uasm_l_large_segbits_fault(l, *p);
+
+               if (mode == refill_scratch && scratch_reg >= 0)
+                       uasm_i_ehb(p);
+
                /*
                 * We get here if we are an xsseg address, or if we are
                 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
@@ -939,12 +948,10 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
                uasm_i_jr(p, ptr);
 
                if (mode == refill_scratch) {
-                       if (scratch_reg >= 0) {
-                               uasm_i_ehb(p);
+                       if (scratch_reg >= 0)
                                UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
-                       } else {
+                       else
                                UASM_i_LW(p, 1, scratchpad_offset(0), 0);
-                       }
                } else {
                        uasm_i_nop(p);
                }
index dfb527961a27c3a813f7a6971243eb5f7f455548..800a21b8b8b044e779f8bae3630c69f3695c3071 100644 (file)
@@ -61,6 +61,7 @@ int init_debug = 1;
 /* memory blocks */
 struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
 
+#define MAX_PROM_MEM 5
 static phys_addr_t prom_mem_base[MAX_PROM_MEM] __initdata;
 static phys_addr_t prom_mem_size[MAX_PROM_MEM] __initdata;
 static unsigned int nr_prom_mem __initdata;
@@ -358,7 +359,7 @@ void __init prom_meminit(void)
                p++;
 
                if (type == BOOT_MEM_ROM_DATA) {
-                       if (nr_prom_mem >= 5) {
+                       if (nr_prom_mem >= MAX_PROM_MEM) {
                                pr_err("Too many ROM DATA regions");
                                continue;
                        }
@@ -377,7 +378,6 @@ void __init prom_free_prom_memory(void)
        char    *ptr;
        int     len = 0;
        int     i;
-       unsigned long addr;
 
        /*
         * preserve environment variables and command line from pmon/bbload
index 69cfa0a5339e437f4739f88b82010e1eb8a7369f..996a934ece7d66319a6048ceff22a8e31209e1cc 100644 (file)
@@ -15,6 +15,7 @@ ccflags-vdso := \
        $(filter -mmicromips,$(KBUILD_CFLAGS)) \
        $(filter -march=%,$(KBUILD_CFLAGS)) \
        $(filter -m%-float,$(KBUILD_CFLAGS)) \
+       $(filter -mno-loongson-%,$(KBUILD_CFLAGS)) \
        -D__VDSO__
 
 ifdef CONFIG_CC_IS_CLANG
@@ -59,7 +60,7 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
 ifndef CONFIG_CPU_MIPSR6
   ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
     $(warning MIPS VDSO requires binutils >= 2.25)
-    obj-vdso-y := $(filter-out gettimeofday.o, $(obj-vdso-y))
+    obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
     ccflags-vdso += -DDISABLE_MIPS_VDSO
   endif
 endif
diff --git a/arch/mips/vdso/gettimeofday.c b/arch/mips/vdso/gettimeofday.c
deleted file mode 100644 (file)
index e8243c7..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- */
-
-#include "vdso.h"
-
-#include <linux/compiler.h>
-#include <linux/time.h>
-
-#include <asm/clocksource.h>
-#include <asm/io.h>
-#include <asm/unistd.h>
-#include <asm/vdso.h>
-
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
-static __always_inline long gettimeofday_fallback(struct timeval *_tv,
-                                         struct timezone *_tz)
-{
-       register struct timezone *tz asm("a1") = _tz;
-       register struct timeval *tv asm("a0") = _tv;
-       register long ret asm("v0");
-       register long nr asm("v0") = __NR_gettimeofday;
-       register long error asm("a3");
-
-       asm volatile(
-       "       syscall\n"
-       : "=r" (ret), "=r" (error)
-       : "r" (tv), "r" (tz), "r" (nr)
-       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
-         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
-
-       return error ? -ret : ret;
-}
-
-#endif
-
-static __always_inline long clock_gettime_fallback(clockid_t _clkid,
-                                          struct timespec *_ts)
-{
-       register struct timespec *ts asm("a1") = _ts;
-       register clockid_t clkid asm("a0") = _clkid;
-       register long ret asm("v0");
-       register long nr asm("v0") = __NR_clock_gettime;
-       register long error asm("a3");
-
-       asm volatile(
-       "       syscall\n"
-       : "=r" (ret), "=r" (error)
-       : "r" (clkid), "r" (ts), "r" (nr)
-       : "$1", "$3", "$8", "$9", "$10", "$11", "$12", "$13",
-         "$14", "$15", "$24", "$25", "hi", "lo", "memory");
-
-       return error ? -ret : ret;
-}
-
-static __always_inline int do_realtime_coarse(struct timespec *ts,
-                                             const union mips_vdso_data *data)
-{
-       u32 start_seq;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               ts->tv_sec = data->xtime_sec;
-               ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       return 0;
-}
-
-static __always_inline int do_monotonic_coarse(struct timespec *ts,
-                                              const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 to_mono_sec;
-       u64 to_mono_nsec;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               ts->tv_sec = data->xtime_sec;
-               ts->tv_nsec = data->xtime_nsec >> data->cs_shift;
-
-               to_mono_sec = data->wall_to_mono_sec;
-               to_mono_nsec = data->wall_to_mono_nsec;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_sec += to_mono_sec;
-       timespec_add_ns(ts, to_mono_nsec);
-
-       return 0;
-}
-
-#ifdef CONFIG_CSRC_R4K
-
-static __always_inline u64 read_r4k_count(void)
-{
-       unsigned int count;
-
-       __asm__ __volatile__(
-       "       .set push\n"
-       "       .set mips32r2\n"
-       "       rdhwr   %0, $2\n"
-       "       .set pop\n"
-       : "=r" (count));
-
-       return count;
-}
-
-#endif
-
-#ifdef CONFIG_CLKSRC_MIPS_GIC
-
-static __always_inline u64 read_gic_count(const union mips_vdso_data *data)
-{
-       void __iomem *gic = get_gic(data);
-       u32 hi, hi2, lo;
-
-       do {
-               hi = __raw_readl(gic + sizeof(lo));
-               lo = __raw_readl(gic);
-               hi2 = __raw_readl(gic + sizeof(lo));
-       } while (hi2 != hi);
-
-       return (((u64)hi) << 32) + lo;
-}
-
-#endif
-
-static __always_inline u64 get_ns(const union mips_vdso_data *data)
-{
-       u64 cycle_now, delta, nsec;
-
-       switch (data->clock_mode) {
-#ifdef CONFIG_CSRC_R4K
-       case VDSO_CLOCK_R4K:
-               cycle_now = read_r4k_count();
-               break;
-#endif
-#ifdef CONFIG_CLKSRC_MIPS_GIC
-       case VDSO_CLOCK_GIC:
-               cycle_now = read_gic_count(data);
-               break;
-#endif
-       default:
-               return 0;
-       }
-
-       delta = (cycle_now - data->cs_cycle_last) & data->cs_mask;
-
-       nsec = (delta * data->cs_mult) + data->xtime_nsec;
-       nsec >>= data->cs_shift;
-
-       return nsec;
-}
-
-static __always_inline int do_realtime(struct timespec *ts,
-                                      const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 ns;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               if (data->clock_mode == VDSO_CLOCK_NONE)
-                       return -ENOSYS;
-
-               ts->tv_sec = data->xtime_sec;
-               ns = get_ns(data);
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, ns);
-
-       return 0;
-}
-
-static __always_inline int do_monotonic(struct timespec *ts,
-                                       const union mips_vdso_data *data)
-{
-       u32 start_seq;
-       u64 ns;
-       u64 to_mono_sec;
-       u64 to_mono_nsec;
-
-       do {
-               start_seq = vdso_data_read_begin(data);
-
-               if (data->clock_mode == VDSO_CLOCK_NONE)
-                       return -ENOSYS;
-
-               ts->tv_sec = data->xtime_sec;
-               ns = get_ns(data);
-
-               to_mono_sec = data->wall_to_mono_sec;
-               to_mono_nsec = data->wall_to_mono_nsec;
-       } while (vdso_data_read_retry(data, start_seq));
-
-       ts->tv_sec += to_mono_sec;
-       ts->tv_nsec = 0;
-       timespec_add_ns(ts, ns + to_mono_nsec);
-
-       return 0;
-}
-
-#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
-
-/*
- * This is behind the ifdef so that we don't provide the symbol when there's no
- * possibility of there being a usable clocksource, because there's nothing we
- * can do without it. When libc fails the symbol lookup it should fall back on
- * the standard syscall path.
- */
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
-       const union mips_vdso_data *data = get_vdso_data();
-       struct timespec ts;
-       int ret;
-
-       ret = do_realtime(&ts, data);
-       if (ret)
-               return gettimeofday_fallback(tv, tz);
-
-       if (tv) {
-               tv->tv_sec = ts.tv_sec;
-               tv->tv_usec = ts.tv_nsec / 1000;
-       }
-
-       if (tz) {
-               tz->tz_minuteswest = data->tz_minuteswest;
-               tz->tz_dsttime = data->tz_dsttime;
-       }
-
-       return 0;
-}
-
-#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
-
-int __vdso_clock_gettime(clockid_t clkid, struct timespec *ts)
-{
-       const union mips_vdso_data *data = get_vdso_data();
-       int ret = -1;
-
-       switch (clkid) {
-       case CLOCK_REALTIME_COARSE:
-               ret = do_realtime_coarse(ts, data);
-               break;
-       case CLOCK_MONOTONIC_COARSE:
-               ret = do_monotonic_coarse(ts, data);
-               break;
-       case CLOCK_REALTIME:
-               ret = do_realtime(ts, data);
-               break;
-       case CLOCK_MONOTONIC:
-               ret = do_monotonic(ts, data);
-               break;
-       default:
-               break;
-       }
-
-       if (ret)
-               ret = clock_gettime_fallback(clkid, ts);
-
-       return ret;
-}
index 73ca89a47f492702f5326525cd67d85be9f8076e..e5de3f89763397109028c242ef700e1f9683ed6d 100644 (file)
@@ -22,7 +22,7 @@
 
 #define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
 
-#define __read_mostly __attribute__((__section__(".data..read_mostly")))
+#define __read_mostly __section(.data..read_mostly)
 
 void parisc_cache_init(void);  /* initializes cache-flushing */
 void disable_sr_hashing_asm(int); /* low level support for above */
index 3eb4bfc1fb365478f11c9c87bfb3b3124fab567a..e080143e79a30576f4899e5b97a35f16a16b890b 100644 (file)
@@ -52,7 +52,7 @@
 })
 
 #ifdef CONFIG_SMP
-# define __lock_aligned __attribute__((__section__(".data..lock_aligned")))
+# define __lock_aligned __section(.data..lock_aligned)
 #endif
 
 #endif /* __PARISC_LDCW_H */
index 1d1d748c227f075685f79dc327bfc52a029885d1..b96d7449697791260fdf2a28985ba7174f412668 100644 (file)
@@ -2125,7 +2125,7 @@ ftrace_regs_caller:
        copy    %rp, %r26
        LDREG   -FTRACE_FRAME_SIZE-PT_SZ_ALGN(%sp), %r25
        ldo     -8(%r25), %r25
-       copy    %r3, %arg2
+       ldo     -FTRACE_FRAME_SIZE(%r1), %arg2
        b,l     ftrace_function_trampoline, %rp
        copy    %r1, %arg3 /* struct pt_regs */
 
index 92a9b5f12f98adfe4e87e6a18ff381e528108946..f29f682352f017fd2c55dd0ba34df9ac82a8c0c5 100644 (file)
@@ -3,7 +3,7 @@
  * arch/parisc/mm/ioremap.c
  *
  * (C) Copyright 1995 1996 Linus Torvalds
- * (C) Copyright 2001-2006 Helge Deller <deller@gmx.de>
+ * (C) Copyright 2001-2019 Helge Deller <deller@gmx.de>
  * (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
  */
 
@@ -84,7 +84,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
        addr = (void __iomem *) area->addr;
        if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
                               phys_addr, pgprot)) {
-               vfree(addr);
+               vunmap(addr);
                return NULL;
        }
 
@@ -92,9 +92,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
 }
 EXPORT_SYMBOL(__ioremap);
 
-void iounmap(const volatile void __iomem *addr)
+void iounmap(const volatile void __iomem *io_addr)
 {
-       if (addr > high_memory)
-               return vfree((void *) (PAGE_MASK & (unsigned long __force) addr));
+       unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
+
+       if (is_vmalloc_addr((void *)addr))
+               vunmap((void *)addr);
 }
 EXPORT_SYMBOL(iounmap);
index 6841bd52738bd3a882d452b456c6cae2cfc4fe8a..dfbd7f22eef5edb23c957a5a69c9151e625a5980 100644 (file)
@@ -50,7 +50,7 @@ endif
 
 BOOTAFLAGS     := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
 
-BOOTARFLAGS    := -cr$(KBUILD_ARFLAGS)
+BOOTARFLAGS    := -crD
 
 ifdef CONFIG_CC_IS_CLANG
 BOOTCFLAGS += $(CLANG_FLAGS)
index 677e9babef801305e61951244f0b32298c1725c7..f9dc597b0b86884dca4dc9c5e1814312bb34e41e 100644 (file)
@@ -91,6 +91,7 @@
 
 static inline void kuap_update_sr(u32 sr, u32 addr, u32 end)
 {
+       addr &= 0xf0000000;     /* align addr to start of segment */
        barrier();      /* make sure thread.kuap is updated before playing with SRs */
        while (addr < end) {
                mtsrin(sr, addr);
index 4ce795d30377388601ffaa8e3819cac3d3e03f4c..ca8db193ae38a5d6967ba4621f63d15e8ce80317 100644 (file)
@@ -35,6 +35,10 @@ static inline void radix__flush_all_lpid(unsigned int lpid)
 {
        WARN_ON(1);
 }
+static inline void radix__flush_all_lpid_guest(unsigned int lpid)
+{
+       WARN_ON(1);
+}
 #endif
 
 extern void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma,
index 409c9bfb43d9dff09e4413f1095e03eaf184f2b1..57c229a86f0811e783c9526283e617f96c485bbf 100644 (file)
@@ -175,4 +175,7 @@ do {                                                                        \
        ARCH_DLINFO_CACHE_GEOMETRY;                                     \
 } while (0)
 
+/* Relocate the kernel image to @final_address */
+void relocate(unsigned long final_address);
+
 #endif /* _ASM_POWERPC_ELF_H */
index a4e7762dd28648ac5dc8e64bc96a6854b532087c..100f1b57ec2fd18531dd16b16c0885c0550f114c 100644 (file)
@@ -3249,7 +3249,20 @@ static void setup_secure_guest(unsigned long kbase, unsigned long fdt)
        /* Switch to secure mode. */
        prom_printf("Switching to secure mode.\n");
 
+       /*
+        * The ultravisor will do an integrity check of the kernel image but we
+        * relocated it so the check will fail. Restore the original image by
+        * relocating it back to the kernel virtual base address.
+        */
+       if (IS_ENABLED(CONFIG_RELOCATABLE))
+               relocate(KERNELBASE);
+
        ret = enter_secure_mode(kbase, fdt);
+
+       /* Relocate the kernel again. */
+       if (IS_ENABLED(CONFIG_RELOCATABLE))
+               relocate(kbase);
+
        if (ret != U_SUCCESS) {
                prom_printf("Returned %d from switching to secure mode.\n", ret);
                prom_rtas_os_term("Switch to secure mode failed.\n");
index 78bab17b1396aefe35ac0a4573fbbbab39acd148..b183ab9c5107c9236930b3d3d9e1f2c7dd27b302 100644 (file)
@@ -26,7 +26,8 @@ _end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 logo_linux_clut224 btext_prepare_BAT
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC.
+relocate"
 
 NM="$1"
 OBJ="$2"
index d7fcdfa7fee40833a2b3d8fb586d6fa13fa34b3e..ec2547cc5ecbe2096d94df0982a00f324940ff6c 100644 (file)
@@ -36,8 +36,8 @@
 #include "book3s.h"
 #include "trace.h"
 
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
+#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
 
 /* #define EXIT_DEBUG */
 
@@ -69,8 +69,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "pthru_all",       VCPU_STAT(pthru_all) },
        { "pthru_host",      VCPU_STAT(pthru_host) },
        { "pthru_bad_aff",   VCPU_STAT(pthru_bad_aff) },
-       { "largepages_2M",    VM_STAT(num_2M_pages) },
-       { "largepages_1G",    VM_STAT(num_1G_pages) },
+       { "largepages_2M",    VM_STAT(num_2M_pages, .mode = 0444) },
+       { "largepages_1G",    VM_STAT(num_1G_pages, .mode = 0444) },
        { NULL }
 };
 
index 74a9cfe84aeedfb945628aa31ec0bae0348e7eec..faebcbb8c4db39351dae28b2d4f44fde60b3345a 100644 (file)
@@ -1921,6 +1921,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        mtspr   SPRN_PCR, r6
 18:
        /* Signal secondary CPUs to continue */
+       li      r0, 0
        stb     r0,VCORE_IN_GUEST(r5)
 19:    lis     r8,0x7fff               /* MAX_INT@h */
        mtspr   SPRN_HDEC,r8
index 591bfb4bfd0fabb74cb2c81c35ff51acef8f58ba..a3f9c665bb5ba4c68039471c9e3b257cfd7814d3 100644 (file)
@@ -1217,6 +1217,7 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc;
        int i, r = -EBUSY;
+       u32 vp_id;
 
        pr_devel("connect_vcpu(cpu=%d)\n", cpu);
 
@@ -1228,25 +1229,32 @@ int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
                return -EPERM;
        if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
                return -EBUSY;
-       if (kvmppc_xive_find_server(vcpu->kvm, cpu)) {
-               pr_devel("Duplicate !\n");
-               return -EEXIST;
-       }
        if (cpu >= (KVM_MAX_VCPUS * vcpu->kvm->arch.emul_smt_mode)) {
                pr_devel("Out of bounds !\n");
                return -EINVAL;
        }
-       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
-       if (!xc)
-               return -ENOMEM;
 
        /* We need to synchronize with queue provisioning */
        mutex_lock(&xive->lock);
+
+       vp_id = kvmppc_xive_vp(xive, cpu);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
+               pr_devel("Duplicate !\n");
+               r = -EEXIST;
+               goto bail;
+       }
+
+       xc = kzalloc(sizeof(*xc), GFP_KERNEL);
+       if (!xc) {
+               r = -ENOMEM;
+               goto bail;
+       }
+
        vcpu->arch.xive_vcpu = xc;
        xc->xive = xive;
        xc->vcpu = vcpu;
        xc->server_num = cpu;
-       xc->vp_id = kvmppc_xive_vp(xive, cpu);
+       xc->vp_id = vp_id;
        xc->mfrr = 0xff;
        xc->valid = true;
 
index 955b820ffd6da64f7c4471db47b271bbc1e8fafe..fe3ed50e081867676ce8af7a4930cfe1ccebd9fe 100644 (file)
@@ -220,6 +220,18 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
        return xive->vp_base + kvmppc_pack_vcpu_id(xive->kvm, server);
 }
 
+static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
+{
+       struct kvm_vcpu *vcpu = NULL;
+       int i;
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
+                       return true;
+       }
+       return false;
+}
+
 /*
  * Mapping between guest priorities and host priorities
  * is as follow.
index 248c1ea9e7881f2ebfc09f2149f5f7a4158ee413..78b906ffa0d2b8bc886fd2c004836d1bac122497 100644 (file)
@@ -106,6 +106,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        struct kvmppc_xive *xive = dev->private;
        struct kvmppc_xive_vcpu *xc = NULL;
        int rc;
+       u32 vp_id;
 
        pr_devel("native_connect_vcpu(server=%d)\n", server_num);
 
@@ -124,7 +125,8 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
 
        mutex_lock(&xive->lock);
 
-       if (kvmppc_xive_find_server(vcpu->kvm, server_num)) {
+       vp_id = kvmppc_xive_vp(xive, server_num);
+       if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
                pr_devel("Duplicate !\n");
                rc = -EEXIST;
                goto bail;
@@ -141,7 +143,7 @@ int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
        xc->vcpu = vcpu;
        xc->server_num = server_num;
 
-       xc->vp_id = kvmppc_xive_vp(xive, server_num);
+       xc->vp_id = vp_id;
        xc->valid = true;
        vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
 
index 1d93e55a2de1e2da86fbaa4335665d54c766a02f..2dd452a047cd6c9fa68fa80d45587e3392e60db2 100644 (file)
@@ -761,6 +761,7 @@ static int spufs_init_fs_context(struct fs_context *fc)
        ctx->gid = current_gid();
        ctx->mode = 0755;
 
+       fc->fs_private = ctx;
        fc->s_fs_info = sbi;
        fc->ops = &spufs_context_ops;
        return 0;
index 6bc24a47e9ef956e654ea022b4e6a9b52c0903df..6f300ab7f0e9b368114d65232d59d0a355d1201d 100644 (file)
@@ -42,7 +42,7 @@ void pnv_pcibios_bus_add_device(struct pci_dev *pdev)
 {
        struct pci_dn *pdn = pci_get_pdn(pdev);
 
-       if (eeh_has_flag(EEH_FORCE_DISABLED))
+       if (!pdn || eeh_has_flag(EEH_FORCE_DISABLED))
                return;
 
        dev_dbg(&pdev->dev, "EEH: Setting up device\n");
index fbd6e6b7bbf28c4ac806681e4abd42e488504af8..13e25169934665e6655c780def5d3d056775d124 100644 (file)
@@ -146,20 +146,25 @@ static int pnv_smp_cpu_disable(void)
        return 0;
 }
 
+static void pnv_flush_interrupts(void)
+{
+       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+               if (xive_enabled())
+                       xive_flush_interrupt();
+               else
+                       icp_opal_flush_interrupt();
+       } else {
+               icp_native_flush_interrupt();
+       }
+}
+
 static void pnv_smp_cpu_kill_self(void)
 {
+       unsigned long srr1, unexpected_mask, wmask;
        unsigned int cpu;
-       unsigned long srr1, wmask;
        u64 lpcr_val;
 
        /* Standard hot unplug procedure */
-       /*
-        * This hard disables local interurpts, ensuring we have no lazy
-        * irqs pending.
-        */
-       WARN_ON(irqs_disabled());
-       hard_irq_disable();
-       WARN_ON(lazy_irq_pending());
 
        idle_task_exit();
        current->active_mm = NULL; /* for sanity */
@@ -172,6 +177,27 @@ static void pnv_smp_cpu_kill_self(void)
        if (cpu_has_feature(CPU_FTR_ARCH_207S))
                wmask = SRR1_WAKEMASK_P8;
 
+       /*
+        * This turns the irq soft-disabled state we're called with, into a
+        * hard-disabled state with pending irq_happened interrupts cleared.
+        *
+        * PACA_IRQ_DEC   - Decrementer should be ignored.
+        * PACA_IRQ_HMI   - Can be ignored, processing is done in real mode.
+        * PACA_IRQ_DBELL, EE, PMI - Unexpected.
+        */
+       hard_irq_disable();
+       if (generic_check_cpu_restart(cpu))
+               goto out;
+
+       unexpected_mask = ~(PACA_IRQ_DEC | PACA_IRQ_HMI | PACA_IRQ_HARD_DIS);
+       if (local_paca->irq_happened & unexpected_mask) {
+               if (local_paca->irq_happened & PACA_IRQ_EE)
+                       pnv_flush_interrupts();
+               DBG("CPU%d Unexpected exit while offline irq_happened=%lx!\n",
+                               cpu, local_paca->irq_happened);
+       }
+       local_paca->irq_happened = PACA_IRQ_HARD_DIS;
+
        /*
         * We don't want to take decrementer interrupts while we are
         * offline, so clear LPCR:PECE1. We keep PECE2 (and
@@ -197,6 +223,7 @@ static void pnv_smp_cpu_kill_self(void)
 
                srr1 = pnv_cpu_offline(cpu);
 
+               WARN_ON_ONCE(!irqs_disabled());
                WARN_ON(lazy_irq_pending());
 
                /*
@@ -212,13 +239,7 @@ static void pnv_smp_cpu_kill_self(void)
                 */
                if (((srr1 & wmask) == SRR1_WAKEEE) ||
                    ((srr1 & wmask) == SRR1_WAKEHVI)) {
-                       if (cpu_has_feature(CPU_FTR_ARCH_300)) {
-                               if (xive_enabled())
-                                       xive_flush_interrupt();
-                               else
-                                       icp_opal_flush_interrupt();
-                       } else
-                               icp_native_flush_interrupt();
+                       pnv_flush_interrupts();
                } else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
                        unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
                        asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
@@ -266,7 +287,7 @@ static void pnv_smp_cpu_kill_self(void)
         */
        lpcr_val = mfspr(SPRN_LPCR) | (u64)LPCR_PECE1;
        pnv_program_cpu_hotplug_lpcr(cpu, lpcr_val);
-
+out:
        DBG("CPU%d coming online...\n", cpu);
 }
 
index b53359258d9953c551f9f90f694adf80240b1763..f87a5c64e24dcf534de83255baf8d7bbd441433f 100644 (file)
@@ -1419,6 +1419,9 @@ void __init pseries_lpar_read_hblkrm_characteristics(void)
        unsigned char local_buffer[SPLPAR_TLB_BIC_MAXLENGTH];
        int call_status, len, idx, bpsize;
 
+       if (!firmware_has_feature(FW_FEATURE_BLOCK_REMOVE))
+               return;
+
        spin_lock(&rtas_data_buf_lock);
        memset(rtas_data_buf, 0, RTAS_DATA_BUF_SIZE);
        call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
index 104d334511cd6b5d619496226bde06fb982f264a..88cfcb96bf233d3bb1c7677eb5526f6072e8ce77 100644 (file)
@@ -13,6 +13,7 @@
        compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
 
        chosen {
+               stdout-path = "serial0";
        };
 
        cpus {
index 5a02b7d5094081d2d1f7e09d5a16dce23073c4f4..9c992a88d858fe6105f16978849f3a564d42b85f 100644 (file)
@@ -22,6 +22,7 @@
 
 #define REG_L          __REG_SEL(ld, lw)
 #define REG_S          __REG_SEL(sd, sw)
+#define REG_SC         __REG_SEL(sc.d, sc.w)
 #define SZREG          __REG_SEL(8, 4)
 #define LGREG          __REG_SEL(3, 2)
 
index 07ceee8b17470a0e0aa9e6b4c03e378c0134d699..75604fec1b1b53fa954602b845f4d959494e595d 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <asm/asm.h>
 
-#ifdef CONFIG_GENERIC_BUG
 #define __INSN_LENGTH_MASK  _UL(0x3)
 #define __INSN_LENGTH_32    _UL(0x3)
 #define __COMPRESSED_INSN_MASK _UL(0xffff)
@@ -20,7 +19,6 @@
 #define __BUG_INSN_32  _UL(0x00100073) /* ebreak */
 #define __BUG_INSN_16  _UL(0x9002) /* c.ebreak */
 
-#ifndef __ASSEMBLY__
 typedef u32 bug_insn_t;
 
 #ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
@@ -43,6 +41,7 @@ typedef u32 bug_insn_t;
        RISCV_SHORT " %2"
 #endif
 
+#ifdef CONFIG_GENERIC_BUG
 #define __BUG_FLAGS(flags)                                     \
 do {                                                           \
        __asm__ __volatile__ (                                  \
@@ -58,14 +57,10 @@ do {                                                                \
                  "i" (flags),                                  \
                  "i" (sizeof(struct bug_entry)));              \
 } while (0)
-
-#endif /* !__ASSEMBLY__ */
 #else /* CONFIG_GENERIC_BUG */
-#ifndef __ASSEMBLY__
 #define __BUG_FLAGS(flags) do {                                        \
        __asm__ __volatile__ ("ebreak\n");                      \
 } while (0)
-#endif /* !__ASSEMBLY__ */
 #endif /* CONFIG_GENERIC_BUG */
 
 #define BUG() do {                                             \
@@ -79,15 +74,10 @@ do {                                                                \
 
 #include <asm-generic/bug.h>
 
-#ifndef __ASSEMBLY__
-
 struct pt_regs;
 struct task_struct;
 
-extern void die(struct pt_regs *regs, const char *str);
-extern void do_trap(struct pt_regs *regs, int signo, int code,
-       unsigned long addr);
-
-#endif /* !__ASSEMBLY__ */
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
 
 #endif /* _ASM_RISCV_BUG_H */
index fc1189ad377728f7f307796731b43f3b5310f8ee..3ba4d93721d302ec79e94d96dc4bd1dac940c56e 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <asm/mmiowb.h>
+#include <asm/pgtable.h>
 
 extern void __iomem *ioremap(phys_addr_t offset, unsigned long size);
 
@@ -161,6 +162,12 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
 #define writeq(v,c)    ({ __io_bw(); writeq_cpu((v),(c)); __io_aw(); })
 #endif
 
+/*
+ *  I/O port access constants.
+ */
+#define IO_SPACE_LIMIT         (PCI_IO_SIZE - 1)
+#define PCI_IOBASE             ((void __iomem *)PCI_IO_START)
+
 /*
  * Emulation routines for the port-mapped IO space used by some PCI drivers.
  * These are defined as being "fully synchronous", but also "not guaranteed to
index 75576424c0f70c20d34e95419224a08837ab363b..6e1b0e0325eb92a26bba60f7c08c7c32a37a45d0 100644 (file)
@@ -7,6 +7,9 @@
 #ifndef _ASM_RISCV_IRQ_H
 #define _ASM_RISCV_IRQ_H
 
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
 #define NR_IRQS         0
 
 void riscv_timer_interrupt(void);
index 7255f2d8395bb07fdd2ce5b4c002fa1581c7782d..d3221017194dd19bfe824683ecb700dee60d6e7b 100644 (file)
@@ -7,6 +7,7 @@
 #define _ASM_RISCV_PGTABLE_H
 
 #include <linux/mmzone.h>
+#include <linux/sizes.h>
 
 #include <asm/pgtable-bits.h>
 
@@ -86,14 +87,7 @@ extern pgd_t swapper_pg_dir[];
 #define VMALLOC_SIZE     (KERN_VIRT_SIZE >> 1)
 #define VMALLOC_END      (PAGE_OFFSET - 1)
 #define VMALLOC_START    (PAGE_OFFSET - VMALLOC_SIZE)
-
-#define FIXADDR_TOP      VMALLOC_START
-#ifdef CONFIG_64BIT
-#define FIXADDR_SIZE     PMD_SIZE
-#else
-#define FIXADDR_SIZE     PGDIR_SIZE
-#endif
-#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+#define PCI_IO_SIZE      SZ_16M
 
 /*
  * Roughly size the vmemmap space to be large enough to fit enough
@@ -108,6 +102,17 @@ extern pgd_t swapper_pg_dir[];
 
 #define vmemmap                ((struct page *)VMEMMAP_START)
 
+#define PCI_IO_END       VMEMMAP_START
+#define PCI_IO_START     (PCI_IO_END - PCI_IO_SIZE)
+#define FIXADDR_TOP      PCI_IO_START
+
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE     PMD_SIZE
+#else
+#define FIXADDR_SIZE     PGDIR_SIZE
+#endif
+#define FIXADDR_START    (FIXADDR_TOP - FIXADDR_SIZE)
+
 /*
  * ZERO_PAGE is a global shared page that is always zero,
  * used for zero-mapped memory areas, etc.
@@ -184,10 +189,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
        return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
 }
 
-static inline pte_t mk_pte(struct page *page, pgprot_t prot)
-{
-       return pfn_pte(page_to_pfn(page), prot);
-}
+#define mk_pte(page, prot)       pfn_pte(page_to_pfn(page), prot)
 
 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
@@ -428,9 +430,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
 #define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)  ((pte_t) { (x).val })
 
-#ifdef CONFIG_FLATMEM
 #define kern_addr_valid(addr)   (1) /* FIXME */
-#endif
 
 extern void *dtb_early_va;
 extern void setup_bootmem(void);
index f0227bdce0f0614925ae772dc80588b8828f2ec0..ee4f0ac62c9d7dce882489ae343965b3856c8cee 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef _ASM_RISCV_SWITCH_TO_H
 #define _ASM_RISCV_SWITCH_TO_H
 
+#include <linux/sched/task_stack.h>
 #include <asm/processor.h>
 #include <asm/ptrace.h>
 #include <asm/csr.h>
index 37ae4e367ad203a55a56cfe47ee85202ed7f1781..f02188a5b0f41b4a193aa432014aaf68bf5cce28 100644 (file)
 #include <linux/mm_types.h>
 #include <asm/smp.h>
 
-/*
- * Flush entire local TLB.  'sfence.vma' implicitly fences with the instruction
- * cache as well, so a 'fence.i' is not necessary.
- */
 static inline void local_flush_tlb_all(void)
 {
        __asm__ __volatile__ ("sfence.vma" : : : "memory");
index b1ade9a49347796cb550696f9c111af8ea9a9ead..a5ad00043104d3c16275f271649af64755c8de0e 100644 (file)
@@ -10,6 +10,7 @@
 #include <asm/processor.h>
 #include <asm/hwcap.h>
 #include <asm/smp.h>
+#include <asm/switch_to.h>
 
 unsigned long elf_hwcap __read_mostly;
 #ifdef CONFIG_FPU
index da7aa88113c2cca00be5902dfff88173067b744b..8ca4798311429a2c9ca0c9efe8d6218637830504 100644 (file)
@@ -98,7 +98,26 @@ _save_context:
  */
        .macro RESTORE_ALL
        REG_L a0, PT_SSTATUS(sp)
-       REG_L a2, PT_SEPC(sp)
+       /*
+        * The current load reservation is effectively part of the processor's
+        * state, in the sense that load reservations cannot be shared between
+        * different hart contexts.  We can't actually save and restore a load
+        * reservation, so instead here we clear any existing reservation --
+        * it's always legal for implementations to clear load reservations at
+        * any point (as long as the forward progress guarantee is kept, but
+        * we'll ignore that here).
+        *
+        * Dangling load reservations can be the result of taking a trap in the
+        * middle of an LR/SC sequence, but can also be the result of a taken
+        * forward branch around an SC -- which is how we implement CAS.  As a
+        * result we need to clear reservations between the last CAS and the
+        * jump back to the new context.  While it is unlikely the store
+        * completes, implementations are allowed to expand reservations to be
+        * arbitrarily large.
+        */
+       REG_L  a2, PT_SEPC(sp)
+       REG_SC x0, a2, PT_SEPC(sp)
+
        csrw CSR_SSTATUS, a0
        csrw CSR_SEPC, a2
 
@@ -254,12 +273,11 @@ restore_all:
 resume_kernel:
        REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
        bnez s0, restore_all
-need_resched:
        REG_L s0, TASK_TI_FLAGS(tp)
        andi s0, s0, _TIF_NEED_RESCHED
        beqz s0, restore_all
        call preempt_schedule_irq
-       j need_resched
+       j restore_all
 #endif
 
 work_pending:
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
new file mode 100644 (file)
index 0000000..105fb04
--- /dev/null
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+void __init parse_dtb(void);
+
+#endif /* __ASM_HEAD_H */
index 6d8659388c4922e1bb4bf5f67121e3407f570129..fffac6ddb0e00412fa76546cfdc0166eb5d4ed30 100644 (file)
@@ -24,7 +24,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        return 0;
 }
 
-asmlinkage void __irq_entry do_IRQ(struct pt_regs *regs)
+asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
index c9ae48333114ededcd0604e1a6bbf3291995e289..e264e59e596e80d71166107f7b5763f74fbf02c8 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/elf.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
+#include <linux/moduleloader.h>
 
 unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
 {
index fb3a082362eb87554ff297f3e026d9663c5a8044..85e3c39bb60bcfa4634212cb3c8a52d06953cf89 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/sched/task_stack.h>
@@ -19,6 +20,7 @@
 #include <asm/csr.h>
 #include <asm/string.h>
 #include <asm/switch_to.h>
+#include <asm/thread_info.h>
 
 extern asmlinkage void ret_from_fork(void);
 extern asmlinkage void ret_from_kernel_thread(void);
index 3687514383669028d9717c35b956b21af5bb1c0e..1252113ef8b2ca9bedd507acdaf7343e5ced13eb 100644 (file)
@@ -148,7 +148,7 @@ long arch_ptrace(struct task_struct *child, long request,
  * Allows PTRACE_SYSCALL to work.  These are called from entry.S in
  * {handle,ret_from}_syscall.
  */
-void do_syscall_trace_enter(struct pt_regs *regs)
+__visible void do_syscall_trace_enter(struct pt_regs *regs)
 {
        if (test_thread_flag(TIF_SYSCALL_TRACE))
                if (tracehook_report_syscall_entry(regs))
@@ -162,7 +162,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
        audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
 }
 
-void do_syscall_trace_exit(struct pt_regs *regs)
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
 {
        audit_syscall_exit(regs);
 
index d0fe623bfb8f31c9a56e9f90ce25ec2b192a2a89..aa56bb135ec49f25dbe989e8b425b1a67c80a740 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/reboot.h>
+#include <linux/pm.h>
 #include <asm/sbi.h>
 
 static void default_power_off(void)
index a990a6cb184f2e2c66a1abeafe3958020a9b2f2a..845ae0e121159719115cc818e096aa0bc621970b 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/tlbflush.h>
 #include <asm/thread_info.h>
 
+#include "head.h"
+
 #ifdef CONFIG_DUMMY_CONSOLE
 struct screen_info screen_info = {
        .orig_video_lines       = 30,
index b14d7647d80014fd1982487b726b563def29775a..d0f6f212f5dfde4e8ef044af3b7c2bca316fabcf 100644 (file)
@@ -26,7 +26,7 @@ struct rt_sigframe {
 
 #ifdef CONFIG_FPU
 static long restore_fp_state(struct pt_regs *regs,
-                            union __riscv_fp_state *sc_fpregs)
+                            union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -53,7 +53,7 @@ static long restore_fp_state(struct pt_regs *regs,
 }
 
 static long save_fp_state(struct pt_regs *regs,
-                         union __riscv_fp_state *sc_fpregs)
+                         union __riscv_fp_state __user *sc_fpregs)
 {
        long err;
        struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
@@ -292,8 +292,8 @@ static void do_signal(struct pt_regs *regs)
  * notification of userspace execution resumption
  * - triggered by the _TIF_WORK_MASK flags
  */
-asmlinkage void do_notify_resume(struct pt_regs *regs,
-       unsigned long thread_info_flags)
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+                                          unsigned long thread_info_flags)
 {
        /* Handle pending signal delivery */
        if (thread_info_flags & _TIF_SIGPENDING)
index b18cd6c8e8fbbf8383b0149da14be73b3c89f328..5c9ec78422c229490556295c7bbdb65def2a2fae 100644 (file)
@@ -8,7 +8,9 @@
  * Copyright (C) 2017 SiFive
  */
 
+#include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/profile.h>
 #include <linux/smp.h>
 #include <linux/sched.h>
 #include <linux/seq_file.h>
index 18ae6da5115e53d4612f84dc8a0898b863e8b55e..261f4087cc39e177fbc063b003dfafc57488cc18 100644 (file)
@@ -29,6 +29,9 @@
 #include <asm/tlbflush.h>
 #include <asm/sections.h>
 #include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
 
 void *__cpu_up_stack_pointer[NR_CPUS];
 void *__cpu_up_task_pointer[NR_CPUS];
@@ -130,7 +133,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
 /*
  * C entry point for a secondary processor.
  */
-asmlinkage void __init smp_callin(void)
+asmlinkage __visible void __init smp_callin(void)
 {
        struct mm_struct *mm = &init_mm;
 
index e5dd52d8f6332b4566787243b7a90313939f97d0..f1ead9df96ca55b11fc7319cfc7b852a7d019676 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/syscalls.h>
 #include <asm-generic/syscalls.h>
 #include <asm/vdso.h>
+#include <asm/syscall.h>
 
 #undef __SYSCALL
 #define __SYSCALL(nr, call)    [nr] = (call),
index 9dd1f2e64db12089aac4cdd8d65ec9d543950a08..6a53c02e9c734c6db04e6236580ace56f0d27b19 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/clocksource.h>
 #include <linux/delay.h>
 #include <asm/sbi.h>
+#include <asm/processor.h>
 
 unsigned long riscv_timebase;
 EXPORT_SYMBOL_GPL(riscv_timebase);
index 424eb72d56b10c51120cf02a6618a504f37d41f9..473de3ae8bb75766ded32ec9ae35b1d4549e156b 100644 (file)
@@ -3,6 +3,7 @@
  * Copyright (C) 2012 Regents of the University of California
  */
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sched.h>
@@ -83,7 +84,7 @@ static void do_trap_error(struct pt_regs *regs, int signo, int code,
 }
 
 #define DO_ERROR_INFO(name, signo, code, str)                          \
-asmlinkage void name(struct pt_regs *regs)                             \
+asmlinkage __visible void name(struct pt_regs *regs)                   \
 {                                                                      \
        do_trap_error(regs, signo, code, regs->sepc, "Oops - " str);    \
 }
@@ -111,7 +112,6 @@ DO_ERROR_INFO(do_trap_ecall_s,
 DO_ERROR_INFO(do_trap_ecall_m,
        SIGILL, ILL_ILLTRP, "environment call from M-mode");
 
-#ifdef CONFIG_GENERIC_BUG
 static inline unsigned long get_break_insn_length(unsigned long pc)
 {
        bug_insn_t insn;
@@ -120,28 +120,15 @@ static inline unsigned long get_break_insn_length(unsigned long pc)
                return 0;
        return (((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? 4UL : 2UL);
 }
-#endif /* CONFIG_GENERIC_BUG */
 
-asmlinkage void do_trap_break(struct pt_regs *regs)
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
 {
-#ifdef CONFIG_GENERIC_BUG
-       if (!user_mode(regs)) {
-               enum bug_trap_type type;
-
-               type = report_bug(regs->sepc, regs);
-               switch (type) {
-               case BUG_TRAP_TYPE_NONE:
-                       break;
-               case BUG_TRAP_TYPE_WARN:
-                       regs->sepc += get_break_insn_length(regs->sepc);
-                       break;
-               case BUG_TRAP_TYPE_BUG:
-                       die(regs, "Kernel BUG");
-               }
-       }
-#endif /* CONFIG_GENERIC_BUG */
-
-       force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)(regs->sepc));
+       if (user_mode(regs))
+               force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->sepc);
+       else if (report_bug(regs->sepc, regs) == BUG_TRAP_TYPE_WARN)
+               regs->sepc += get_break_insn_length(regs->sepc);
+       else
+               die(regs, "Kernel BUG");
 }
 
 #ifdef CONFIG_GENERIC_BUG
index c9c21e0d56417cf3ebb3ccab116f44710da1b31f..484d95a70907afba706aec216ffc16450fb39e9d 100644 (file)
@@ -6,6 +6,7 @@
  * Copyright (C) 2015 Regents of the University of California
  */
 
+#include <linux/elf.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/binfmts.h>
@@ -25,7 +26,7 @@ static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
 } vdso_data_store __page_aligned_data;
-struct vdso_data *vdso_data = &vdso_data_store.data;
+static struct vdso_data *vdso_data = &vdso_data_store.data;
 
 static int __init vdso_init(void)
 {
index beeb5d7f92ea5aaccefc10e41ea036f5554da81a..ca66d44156b628d087fe1b2fc75c35f27f14e38f 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/mm.h>
 #include <asm/tlbflush.h>
 #include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
 
 /*
  * When necessary, performs a deferred icache flush for the given MM context,
index 96add1427a75a5790acd3add530f33422c0808d9..247b8c859c448ae16467b3c8e8b620adfaa94dfd 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 
+#include "../kernel/head.h"
+
 /*
  * This routine handles page faults.  It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
index f0ba71304b6e52b8622ac87e690673593ee8bb6d..573463d1c799a0425e550681507643a9e60fe1cb 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/swap.h>
 #include <linux/sizes.h>
 #include <linux/of_fdt.h>
+#include <linux/libfdt.h>
 
 #include <asm/fixmap.h>
 #include <asm/tlbflush.h>
@@ -18,6 +19,8 @@
 #include <asm/pgtable.h>
 #include <asm/io.h>
 
+#include "../kernel/head.h"
+
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                        __page_aligned_bss;
 EXPORT_SYMBOL(empty_zero_page);
@@ -82,6 +85,8 @@ disable:
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static phys_addr_t dtb_early_pa __initdata;
+
 void __init setup_bootmem(void)
 {
        struct memblock_region *reg;
@@ -117,7 +122,12 @@ void __init setup_bootmem(void)
        setup_initrd();
 #endif /* CONFIG_BLK_DEV_INITRD */
 
-       early_init_fdt_reserve_self();
+       /*
+        * Avoid using early_init_fdt_reserve_self() since __pa() does
+        * not work for DTB pointers that are fixmap addresses
+        */
+       memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+
        early_init_fdt_scan_reserved_mem();
        memblock_allow_resize();
        memblock_dump_all();
@@ -329,8 +339,7 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
  */
 
 #ifndef __riscv_cmodel_medany
-#error "setup_vm() is called from head.S before relocate so it should "
-       "not use absolute addressing."
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
 #endif
 
 asmlinkage void __init setup_vm(uintptr_t dtb_pa)
@@ -393,6 +402,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
 
        /* Save pointer to DTB for early FDT parsing */
        dtb_early_va = (void *)fix_to_virt(FIX_FDT) + (dtb_pa & ~PAGE_MASK);
+       /* Save physical address for memblock reservation */
+       dtb_early_pa = dtb_pa;
 }
 
 static void __init setup_vm_final(void)
@@ -448,7 +459,7 @@ void __init paging_init(void)
        zone_sizes_init();
 }
 
-#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
                               struct vmem_altmap *altmap)
 {
index 2e637ad71c05f099b5e4e45f1abb39bac5991d64..a9ffff3277c77fa7b1847e45b890689aed909d7c 100644 (file)
@@ -142,7 +142,7 @@ static irqreturn_t l2_int_handler(int irq, void *device)
        return IRQ_HANDLED;
 }
 
-int __init sifive_l2_init(void)
+static int __init sifive_l2_init(void)
 {
        struct device_node *np;
        struct resource res;
index 596ca7cc4d7b88a7c37b93cc58d87651ffe4bd9d..5367950510f695ae4c970dfa511f78456259a933 100644 (file)
@@ -101,10 +101,18 @@ static void handle_relocs(unsigned long offset)
        dynsym = (Elf64_Sym *) vmlinux.dynsym_start;
        for (rela = rela_start; rela < rela_end; rela++) {
                loc = rela->r_offset + offset;
-               val = rela->r_addend + offset;
+               val = rela->r_addend;
                r_sym = ELF64_R_SYM(rela->r_info);
-               if (r_sym)
-                       val += dynsym[r_sym].st_value;
+               if (r_sym) {
+                       if (dynsym[r_sym].st_shndx != SHN_UNDEF)
+                               val += dynsym[r_sym].st_value + offset;
+               } else {
+                       /*
+                        * 0 == undefined symbol table index (STN_UNDEF),
+                        * used for R_390_RELATIVE, only add KASLR offset
+                        */
+                       val += offset;
+               }
                r_type = ELF64_R_TYPE(rela->r_info);
                rc = arch_kexec_do_relocs(r_type, (void *) loc, val, 0);
                if (rc)
index 347f48702edb17154779c38db7fcb3c38be8a540..38d64030aacf69dc6cd2c0b62b13c67d79a3d4ea 100644 (file)
@@ -44,6 +44,7 @@ CONFIG_NR_CPUS=512
 CONFIG_NUMA=y
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
 CONFIG_EXPOLINE=y
 CONFIG_EXPOLINE_AUTO=y
 CONFIG_CHSC_SCH=y
@@ -69,12 +70,13 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
 CONFIG_MODULE_SIG_SHA256=y
+CONFIG_UNUSED_SYMBOLS=y
 CONFIG_BLK_DEV_INTEGRITY=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -370,6 +372,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+# CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_HOTPLUG_PCI=y
@@ -424,6 +427,7 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
 CONFIG_DM_WRITECACHE=m
+CONFIG_DM_CLONE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -435,6 +439,7 @@ CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
 CONFIG_NETDEVICES=y
 CONFIG_BONDING=m
@@ -489,6 +494,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RDC is not set
@@ -538,15 +544,16 @@ CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
-CONFIG_DRM=y
-CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
+CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
@@ -580,6 +587,8 @@ CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -589,6 +598,7 @@ CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -648,12 +658,15 @@ CONFIG_FORTIFY_SOURCE=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
 CONFIG_CRYPTO_PCRYPT=m
@@ -664,10 +677,6 @@ CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_PCBC=m
@@ -739,7 +748,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
 CONFIG_HEADERS_INSTALL=y
 CONFIG_HEADERS_CHECK=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
index 8514b8b9500f30ae2479684130bc3d5e375b2290..25f79984958219506566216ca4a34db0e49a31e4 100644 (file)
@@ -44,6 +44,7 @@ CONFIG_NUMA=y
 # CONFIG_NUMA_EMU is not set
 CONFIG_HZ_100=y
 CONFIG_KEXEC_FILE=y
+CONFIG_KEXEC_SIG=y
 CONFIG_EXPOLINE=y
 CONFIG_EXPOLINE_AUTO=y
 CONFIG_CHSC_SCH=y
@@ -66,11 +67,12 @@ CONFIG_MODULE_UNLOAD=y
 CONFIG_MODULE_FORCE_UNLOAD=y
 CONFIG_MODVERSIONS=y
 CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_MODULE_SIG=y
 CONFIG_MODULE_SIG_SHA256=y
+CONFIG_UNUSED_SYMBOLS=y
 CONFIG_BLK_DEV_THROTTLING=y
 CONFIG_BLK_WBT=y
 CONFIG_BLK_CGROUP_IOLATENCY=y
+CONFIG_BLK_CGROUP_IOCOST=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_BSD_DISKLABEL=y
@@ -363,6 +365,7 @@ CONFIG_NETLINK_DIAG=m
 CONFIG_CGROUP_NET_PRIO=y
 CONFIG_BPF_JIT=y
 CONFIG_NET_PKTGEN=m
+# CONFIG_NET_DROP_MONITOR is not set
 CONFIG_PCI=y
 CONFIG_HOTPLUG_PCI=y
 CONFIG_HOTPLUG_PCI_S390=y
@@ -418,6 +421,7 @@ CONFIG_DM_CRYPT=m
 CONFIG_DM_SNAPSHOT=m
 CONFIG_DM_THIN_PROVISIONING=m
 CONFIG_DM_WRITECACHE=m
+CONFIG_DM_CLONE=m
 CONFIG_DM_MIRROR=m
 CONFIG_DM_LOG_USERSPACE=m
 CONFIG_DM_RAID=m
@@ -429,6 +433,7 @@ CONFIG_DM_DELAY=m
 CONFIG_DM_UEVENT=y
 CONFIG_DM_FLAKEY=m
 CONFIG_DM_VERITY=m
+CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG=y
 CONFIG_DM_SWITCH=m
 CONFIG_DM_INTEGRITY=m
 CONFIG_NETDEVICES=y
@@ -484,6 +489,7 @@ CONFIG_MLX5_CORE_EN=y
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_PENSANDO is not set
 # CONFIG_NET_VENDOR_QLOGIC is not set
 # CONFIG_NET_VENDOR_QUALCOMM is not set
 # CONFIG_NET_VENDOR_RDC is not set
@@ -533,16 +539,16 @@ CONFIG_WATCHDOG_CORE=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
 CONFIG_DIAG288_WATCHDOG=m
-CONFIG_DRM=y
-CONFIG_DRM_VIRTIO_GPU=y
-# CONFIG_BACKLIGHT_CLASS_DEVICE is not set
+CONFIG_FB=y
 CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
 CONFIG_INFINIBAND_USER_ACCESS=m
 CONFIG_MLX4_INFINIBAND=m
 CONFIG_MLX5_INFINIBAND=m
+CONFIG_SYNC_FILE=y
 CONFIG_VFIO=m
 CONFIG_VFIO_PCI=m
 CONFIG_VFIO_MDEV=m
@@ -573,6 +579,8 @@ CONFIG_NILFS2_FS=m
 CONFIG_FS_DAX=y
 CONFIG_EXPORTFS_BLOCK_OPS=y
 CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
@@ -581,6 +589,7 @@ CONFIG_QFMT_V2=m
 CONFIG_AUTOFS4_FS=m
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=m
+CONFIG_VIRTIO_FS=m
 CONFIG_OVERLAY_FS=m
 CONFIG_FSCACHE=m
 CONFIG_CACHEFILES=m
@@ -639,12 +648,15 @@ CONFIG_SECURITY_NETWORK=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SECURITY_SELINUX_BOOTPARAM=y
 CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_SECURITY_LOCKDOWN_LSM=y
+CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y
 CONFIG_INTEGRITY_SIGNATURE=y
 CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
 CONFIG_IMA=y
 CONFIG_IMA_DEFAULT_HASH_SHA256=y
 CONFIG_IMA_WRITE_POLICY=y
 CONFIG_IMA_APPRAISE=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
 CONFIG_CRYPTO_FIPS=y
 CONFIG_CRYPTO_USER=m
 # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
@@ -656,10 +668,6 @@ CONFIG_CRYPTO_ECDH=m
 CONFIG_CRYPTO_ECRDSA=m
 CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_CRYPTO_AEGIS128=m
-CONFIG_CRYPTO_AEGIS128L=m
-CONFIG_CRYPTO_AEGIS256=m
-CONFIG_CRYPTO_MORUS640=m
-CONFIG_CRYPTO_MORUS1280=m
 CONFIG_CRYPTO_CFB=m
 CONFIG_CRYPTO_LRW=m
 CONFIG_CRYPTO_OFB=m
@@ -727,7 +735,6 @@ CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_INFO_DWARF4=y
 CONFIG_GDB_SCRIPTS=y
 CONFIG_FRAME_WARN=1024
-CONFIG_UNUSED_SYMBOLS=y
 CONFIG_DEBUG_SECTION_MISMATCH=y
 CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_MEMORY_INIT=y
index be09a208b608437af55f2c3f76d4b444b73ac431..20c51e5d93530ac536beeead8d2ded1bde02518a 100644 (file)
@@ -61,7 +61,7 @@ CONFIG_RAW_DRIVER=y
 CONFIG_CONFIGFS_FS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 # CONFIG_NETWORK_FILESYSTEMS is not set
-# CONFIG_DIMLIB is not set
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
 CONFIG_PRINTK_TIME=y
 CONFIG_DEBUG_INFO=y
 CONFIG_DEBUG_FS=y
index d3f09526ee19fadab79306c30038f26475a3e9a7..61467b9eecc72513ba28184f27c87595796c0ccc 100644 (file)
@@ -41,7 +41,7 @@ __ATOMIC_OPS(__atomic64_xor, long, "laxg")
 #undef __ATOMIC_OP
 
 #define __ATOMIC_CONST_OP(op_name, op_type, op_string, op_barrier)     \
-static inline void op_name(op_type val, op_type *ptr)                  \
+static __always_inline void op_name(op_type val, op_type *ptr)         \
 {                                                                      \
        asm volatile(                                                   \
                op_string "     %[ptr],%[val]\n"                        \
index b8833ac983fac2e94d699069c2394dd3261133d6..eb7eed43e7808f8920548450d00ee4dcaedd38f3 100644 (file)
@@ -56,7 +56,7 @@ __bitops_byte(unsigned long nr, volatile unsigned long *ptr)
        return ((unsigned char *)ptr) + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
 }
 
-static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
@@ -77,7 +77,7 @@ static inline void arch_set_bit(unsigned long nr, volatile unsigned long *ptr)
        __atomic64_or(mask, (long *)addr);
 }
 
-static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
+static __always_inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
@@ -98,8 +98,8 @@ static inline void arch_clear_bit(unsigned long nr, volatile unsigned long *ptr)
        __atomic64_and(mask, (long *)addr);
 }
 
-static inline void arch_change_bit(unsigned long nr,
-                                  volatile unsigned long *ptr)
+static __always_inline void arch_change_bit(unsigned long nr,
+                                           volatile unsigned long *ptr)
 {
        unsigned long *addr = __bitops_word(nr, ptr);
        unsigned long mask;
index a092f63aac6a2e9ec20d7a3e8a9a881d342975b0..c0f3bfeddcbeb5762134063b910d66b115be7b49 100644 (file)
@@ -171,7 +171,7 @@ typedef struct { unsigned char bytes[16]; } cpacf_mask_t;
  *
  * Returns 1 if @func is available for @opcode, 0 otherwise
  */
-static inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
+static __always_inline void __cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 {
        register unsigned long r0 asm("0") = 0; /* query function */
        register unsigned long r1 asm("1") = (unsigned long) mask;
index ceeb552d34729497418238bb8f27b0193dede941..819803a97c2b2a9032352879fe4ec34880ea4f6b 100644 (file)
@@ -28,6 +28,8 @@ asm(".include \"asm/cpu_mf-insn.h\"\n");
                                 CPU_MF_INT_SF_PRA|CPU_MF_INT_SF_SACA|  \
                                 CPU_MF_INT_SF_LSDA)
 
+#define CPU_MF_SF_RIBM_NOTAV   0x1             /* Sampling unavailable */
+
 /* CPU measurement facility support */
 static inline int cpum_cf_avail(void)
 {
@@ -69,7 +71,8 @@ struct hws_qsi_info_block {       /* Bit(s) */
        unsigned long max_sampl_rate; /* 16-23: maximum sampling interval*/
        unsigned long tear;         /* 24-31: TEAR contents              */
        unsigned long dear;         /* 32-39: DEAR contents              */
-       unsigned int rsvrd0;        /* 40-43: reserved                   */
+       unsigned int rsvrd0:24;     /* 40-42: reserved                   */
+       unsigned int ribm:8;        /* 43: Reserved by IBM               */
        unsigned int cpu_speed;     /* 44-47: CPU speed                  */
        unsigned long long rsvrd1;  /* 48-55: reserved                   */
        unsigned long long rsvrd2;  /* 56-63: reserved                   */
@@ -220,7 +223,8 @@ enum stcctm_ctr_set {
        MT_DIAG = 5,
        MT_DIAG_CLEARING = 9,   /* clears loss-of-MT-ctr-data alert */
 };
-static inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
+
+static __always_inline int stcctm(enum stcctm_ctr_set set, u64 range, u64 *dest)
 {
        int cc;
 
index bb59dd9645909207351edc8d3577985f1aead9fb..de8f0bf5f238c4fba2cb32ea8985eabbb4aa8f5e 100644 (file)
@@ -12,8 +12,6 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 
-
-#define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range                 free_pgd_range
 #define hugepages_supported()                  (MACHINE_HAS_EDAT1)
 
@@ -23,6 +21,13 @@ pte_t huge_ptep_get(pte_t *ptep);
 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                              unsigned long addr, pte_t *ptep);
 
+static inline bool is_hugepage_only_range(struct mm_struct *mm,
+                                         unsigned long addr,
+                                         unsigned long len)
+{
+       return false;
+}
+
 /*
  * If the arch doesn't supply something else, assume that hugepage
  * size aligned regions are ok without further preparation.
index e548ec1ec12c5ea5e61cc4708388c55badbd3e39..39f747d63758e5333ef9d8d4885b5b84eaaa1510 100644 (file)
@@ -20,7 +20,7 @@
  * We use a brcl 0,2 instruction for jump labels at compile time so it
  * can be easily distinguished from a hotpatch generated instruction.
  */
-static inline bool arch_static_branch(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
 {
        asm_volatile_goto("0:   brcl    0,"__stringify(JUMP_LABEL_NOP_OFFSET)"\n"
                          ".pushsection __jump_table,\"aw\"\n"
@@ -34,7 +34,7 @@ label:
        return true;
 }
 
-static inline bool arch_static_branch_jump(struct static_key *key, bool branch)
+static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
 {
        asm_volatile_goto("0:   brcl 15,%l[label]\n"
                          ".pushsection __jump_table,\"aw\"\n"
index 36c578c0ff969fe99209cfe7fa826dd0f495ce13..5ff98d76a66cd20851ecd6b8fe5b5a84d53a4a2e 100644 (file)
@@ -997,9 +997,9 @@ static inline pte_t pte_mkhuge(pte_t pte)
 #define IPTE_NODAT     0x400
 #define IPTE_GUEST_ASCE        0x800
 
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long pto = (unsigned long) ptep;
 
@@ -1020,8 +1020,8 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep,
                : [r1] "a" (pto), [m4] "i" (local) : "memory");
 }
 
-static inline void __ptep_ipte_range(unsigned long address, int nr,
-                                    pte_t *ptep, int local)
+static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
+                                             pte_t *ptep, int local)
 {
        unsigned long pto = (unsigned long) ptep;
 
@@ -1269,7 +1269,8 @@ static inline pte_t *pte_offset(pmd_t *pmd, unsigned long address)
 
 #define pte_offset_kernel(pmd, address) pte_offset(pmd, address)
 #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
-#define pte_unmap(pte) do { } while (0)
+
+static inline void pte_unmap(pte_t *pte) { }
 
 static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
 {
@@ -1435,9 +1436,9 @@ static inline void __pmdp_csp(pmd_t *pmdp)
 #define IDTE_NODAT     0x1000
 #define IDTE_GUEST_ASCE        0x2000
 
-static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long sto;
 
@@ -1461,9 +1462,9 @@ static inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
        }
 }
 
-static inline void __pudp_idte(unsigned long addr, pud_t *pudp,
-                              unsigned long opt, unsigned long asce,
-                              int local)
+static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
+                                       unsigned long opt, unsigned long asce,
+                                       int local)
 {
        unsigned long r3o;
 
index 78e8a888306d68d7214090c44fbf1df2aa374909..e3f238e8c611660be53b9603e1ced4152c669499 100644 (file)
@@ -111,7 +111,7 @@ struct qib {
        /* private: */
        u8 res[88];
        /* public: */
-       u8 parm[QDIO_MAX_BUFFERS_PER_Q];
+       u8 parm[128];
 } __attribute__ ((packed, aligned(256)));
 
 /**
index bd2fd9a7821da55773883e6ede43f42e1518ee3c..a470f1fa9f2afb3c895d73720c66d18c00aefa83 100644 (file)
@@ -83,7 +83,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
        __rc;                                                   \
 })
 
-static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
+static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
 {
        unsigned long spec = 0x010000UL;
        int rc;
@@ -113,7 +113,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
        return rc;
 }
 
-static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
+static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size)
 {
        unsigned long spec = 0x01UL;
        int rc;
index d827b5b9a32cd6a44d7beddba18b96cdcbb18aa9..eaaefeceef6f0810c53108b390420b351257a052 100644 (file)
@@ -35,6 +35,7 @@ struct unwind_state {
        struct task_struct *task;
        struct pt_regs *regs;
        unsigned long sp, ip;
+       bool reuse_sp;
        int graph_idx;
        bool reliable;
        bool error;
index b9d8fe45737aa2e529cb9eec244349e121803858..8f8456816d83e890a3f3150ccec184f21810a509 100644 (file)
@@ -69,18 +69,26 @@ DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
 static ssize_t show_idle_time(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
+       unsigned long long now, idle_time, idle_enter, idle_exit, in_idle;
        struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
-       unsigned long long now, idle_time, idle_enter, idle_exit;
        unsigned int seq;
 
        do {
-               now = get_tod_clock();
                seq = read_seqcount_begin(&idle->seqcount);
                idle_time = READ_ONCE(idle->idle_time);
                idle_enter = READ_ONCE(idle->clock_idle_enter);
                idle_exit = READ_ONCE(idle->clock_idle_exit);
        } while (read_seqcount_retry(&idle->seqcount, seq));
-       idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
+       in_idle = 0;
+       now = get_tod_clock();
+       if (idle_enter) {
+               if (idle_exit) {
+                       in_idle = idle_exit - idle_enter;
+               } else if (now > idle_enter) {
+                       in_idle = now - idle_enter;
+               }
+       }
+       idle_time += in_idle;
        return sprintf(buf, "%llu\n", idle_time >> 12);
 }
 DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
@@ -88,17 +96,24 @@ DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
 u64 arch_cpu_idle_time(int cpu)
 {
        struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
-       unsigned long long now, idle_enter, idle_exit;
+       unsigned long long now, idle_enter, idle_exit, in_idle;
        unsigned int seq;
 
        do {
-               now = get_tod_clock();
                seq = read_seqcount_begin(&idle->seqcount);
                idle_enter = READ_ONCE(idle->clock_idle_enter);
                idle_exit = READ_ONCE(idle->clock_idle_exit);
        } while (read_seqcount_retry(&idle->seqcount, seq));
-
-       return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);
+       in_idle = 0;
+       now = get_tod_clock();
+       if (idle_enter) {
+               if (idle_exit) {
+                       in_idle = idle_exit - idle_enter;
+               } else if (now > idle_enter) {
+                       in_idle = now - idle_enter;
+               }
+       }
+       return cputime_to_nsecs(in_idle);
 }
 
 void arch_cpu_idle_enter(void)
index 3b664cb3ec4d3b57ba3ca53fa2c3af1d8f92f9e1..d5035de9020e738819a06b9e0ecf74b77879ac4d 100644 (file)
@@ -27,6 +27,7 @@ int arch_kexec_do_relocs(int r_type, void *loc, unsigned long val,
                *(u32 *)loc = val;
                break;
        case R_390_64:          /* Direct 64 bit.  */
+       case R_390_GLOB_DAT:
                *(u64 *)loc = val;
                break;
        case R_390_PC16:        /* PC relative 16 bit.  */
index 5f1fd1581330fd855201ecfa44650467330fe993..2654e348801a1269998ee344df5eed1eb1658e51 100644 (file)
@@ -390,7 +390,7 @@ static size_t cf_diag_getctrset(struct cf_ctrset_entry *ctrdata, int ctrset,
 
        debug_sprintf_event(cf_diag_dbg, 6,
                            "%s ctrset %d ctrset_size %zu cfvn %d csvn %d"
-                           " need %zd rc:%d\n",
+                           " need %zd rc %d\n",
                            __func__, ctrset, ctrset_size, cpuhw->info.cfvn,
                            cpuhw->info.csvn, need, rc);
        return need;
@@ -567,7 +567,7 @@ static int cf_diag_add(struct perf_event *event, int flags)
        int err = 0;
 
        debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d flags %#x cpuhw:%p\n",
+                           "%s event %p cpu %d flags %#x cpuhw %p\n",
                            __func__, event, event->cpu, flags, cpuhw);
 
        if (cpuhw->flags & PMU_F_IN_USE) {
index 544a02e944c65156aca41c4b8edffe2882862bfb..3d8b12a9a6ff44a4588ae52db2668222c2861fd2 100644 (file)
@@ -803,6 +803,12 @@ static int __hw_perf_event_init(struct perf_event *event)
                goto out;
        }
 
+       if (si.ribm & CPU_MF_SF_RIBM_NOTAV) {
+               pr_warn("CPU Measurement Facility sampling is temporarily not available\n");
+               err = -EBUSY;
+               goto out;
+       }
+
        /* Always enable basic sampling */
        SAMPL_FLAGS(hwc) = PERF_CPUM_SF_BASIC_MODE;
 
@@ -895,7 +901,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
 
        /* Check online status of the CPU to which the event is pinned */
        if (event->cpu >= 0 && !cpu_online(event->cpu))
-                       return -ENODEV;
+               return -ENODEV;
 
        /* Force reset of idle/hv excludes regardless of what the
         * user requested.
index 8fc9daae47a2ee89a25161ff9775673ee9f37dd8..a8204f952315d9edda9090a1d3762e37cc5fb99c 100644 (file)
@@ -46,10 +46,15 @@ bool unwind_next_frame(struct unwind_state *state)
 
        regs = state->regs;
        if (unlikely(regs)) {
-               sp = READ_ONCE_NOCHECK(regs->gprs[15]);
-               if (unlikely(outside_of_stack(state, sp))) {
-                       if (!update_stack_info(state, sp))
-                               goto out_err;
+               if (state->reuse_sp) {
+                       sp = state->sp;
+                       state->reuse_sp = false;
+               } else {
+                       sp = READ_ONCE_NOCHECK(regs->gprs[15]);
+                       if (unlikely(outside_of_stack(state, sp))) {
+                               if (!update_stack_info(state, sp))
+                                       goto out_err;
+                       }
                }
                sf = (struct stack_frame *) sp;
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
@@ -107,9 +112,9 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
 {
        struct stack_info *info = &state->stack_info;
        unsigned long *mask = &state->stack_mask;
+       bool reliable, reuse_sp;
        struct stack_frame *sf;
        unsigned long ip;
-       bool reliable;
 
        memset(state, 0, sizeof(*state));
        state->task = task;
@@ -134,10 +139,12 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        if (regs) {
                ip = READ_ONCE_NOCHECK(regs->psw.addr);
                reliable = true;
+               reuse_sp = true;
        } else {
                sf = (struct stack_frame *) sp;
                ip = READ_ONCE_NOCHECK(sf->gprs[8]);
                reliable = false;
+               reuse_sp = false;
        }
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -151,5 +158,6 @@ void __unwind_start(struct unwind_state *state, struct task_struct *task,
        state->sp = sp;
        state->ip = ip;
        state->reliable = reliable;
+       state->reuse_sp = reuse_sp;
 }
 EXPORT_SYMBOL_GPL(__unwind_start);
index f6db0f1bc86749ab5d513a153e3abbe341408db7..d047e846e1b9f2e6795e46564543f70c4233ca8a 100644 (file)
@@ -332,7 +332,7 @@ static inline int plo_test_bit(unsigned char nr)
        return cc == 0;
 }
 
-static inline void __insn32_query(unsigned int opcode, u8 query[32])
+static __always_inline void __insn32_query(unsigned int opcode, u8 *query)
 {
        register unsigned long r0 asm("0") = 0; /* query function */
        register unsigned long r1 asm("1") = (unsigned long) query;
@@ -340,9 +340,9 @@ static inline void __insn32_query(unsigned int opcode, u8 query[32])
        asm volatile(
                /* Parameter regs are ignored */
                "       .insn   rrf,%[opc] << 16,2,4,6,0\n"
-               : "=m" (*query)
+               :
                : "d" (r0), "a" (r1), [opc] "i" (opcode)
-               : "cc");
+               : "cc", "memory");
 }
 
 #define INSN_SORTL 0xb938
index 510a18299196f3b797be51fd22b2bbfbe5cd003f..a51c892f14f3ee5bd393ed74003c96fd80268979 100644 (file)
@@ -298,16 +298,16 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
        }
 
        if (write) {
-               len = *lenp;
-               if (copy_from_user(buf, buffer,
-                                  len > sizeof(buf) ? sizeof(buf) : len))
+               len = min(*lenp, sizeof(buf));
+               if (copy_from_user(buf, buffer, len))
                        return -EFAULT;
-               buf[sizeof(buf) - 1] = '\0';
+               buf[len - 1] = '\0';
                cmm_skip_blanks(buf, &p);
                nr = simple_strtoul(p, &p, 0);
                cmm_skip_blanks(p, &p);
                seconds = simple_strtoul(p, &p, 0);
                cmm_set_timeout(nr, seconds);
+               *ppos += *lenp;
        } else {
                len = sprintf(buf, "%ld %ld\n",
                              cmm_timeout_pages, cmm_timeout_seconds);
@@ -315,9 +315,9 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
                        len = *lenp;
                if (copy_to_user(buffer, buf, len))
                        return -EFAULT;
+               *lenp = len;
+               *ppos += len;
        }
-       *lenp = len;
-       *ppos += len;
        return 0;
 }
 
index 9bdff4defef199746ef19d35207eb559dade6028..e585a62d653000c99ae3fd7beeec50b00de74e12 100644 (file)
@@ -66,7 +66,7 @@ static inline int clp_get_ilp(unsigned long *ilp)
 /*
  * Call Logical Processor with c=0, the give constant lps and an lpcb request.
  */
-static inline int clp_req(void *data, unsigned int lps)
+static __always_inline int clp_req(void *data, unsigned int lps)
 {
        struct { u8 _[CLP_BLK_SIZE]; } *req = data;
        u64 ignored;
index fbc1aecf0f94ce085bc099c82c2b4a555e7a8b6c..eb24cb1afc11fbcc044583c3277b55aca9234f71 100644 (file)
@@ -29,7 +29,6 @@ config SPARC
        select RTC_DRV_M48T59
        select RTC_SYSTOHC
        select HAVE_ARCH_JUMP_LABEL if SPARC64
-       select HAVE_FAST_GUP if SPARC64
        select GENERIC_IRQ_SHOW
        select ARCH_WANT_IPC_PARSE_VERSION
        select GENERIC_PCI_IOMAP
index 612535cd97065beecb42bb59c4d2d4ca8252f6c4..6627d7c30f3700954ca655527b3490dbf28dacde 100644 (file)
@@ -1403,8 +1403,12 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
 
        spin_unlock_irq(&ubd_dev->lock);
 
-       if (ret < 0)
-               blk_mq_requeue_request(req, true);
+       if (ret < 0) {
+               if (ret == -ENOMEM)
+                       res = BLK_STS_RESOURCE;
+               else
+                       res = BLK_STS_DEV_RESOURCE;
+       }
 
        return res;
 }
index 149795c369f275d241ffaa7c28106bd184c99b1c..25019d42ae937eeae4798115ea9995eaaa744dda 100644 (file)
  */
 struct mem_vector immovable_mem[MAX_NUMNODES*2];
 
-/*
- * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
- * digits, and '\0' for termination.
- */
-#define MAX_ADDR_LEN 19
-
-static acpi_physical_address get_cmdline_acpi_rsdp(void)
-{
-       acpi_physical_address addr = 0;
-
-#ifdef CONFIG_KEXEC
-       char val[MAX_ADDR_LEN] = { };
-       int ret;
-
-       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
-       if (ret < 0)
-               return 0;
-
-       if (kstrtoull(val, 16, &addr))
-               return 0;
-#endif
-       return addr;
-}
-
 /*
  * Search EFI system tables for RSDP.  If both ACPI_20_TABLE_GUID and
  * ACPI_TABLE_GUID are found, take the former, which has more features.
@@ -298,6 +274,30 @@ acpi_physical_address get_rsdp_addr(void)
 }
 
 #if defined(CONFIG_RANDOMIZE_BASE) && defined(CONFIG_MEMORY_HOTREMOVE)
+/*
+ * Max length of 64-bit hex address string is 19, prefix "0x" + 16 hex
+ * digits, and '\0' for termination.
+ */
+#define MAX_ADDR_LEN 19
+
+static acpi_physical_address get_cmdline_acpi_rsdp(void)
+{
+       acpi_physical_address addr = 0;
+
+#ifdef CONFIG_KEXEC
+       char val[MAX_ADDR_LEN] = { };
+       int ret;
+
+       ret = cmdline_find_option("acpi_rsdp", val, MAX_ADDR_LEN);
+       if (ret < 0)
+               return 0;
+
+       if (kstrtoull(val, 16, &addr))
+               return 0;
+#endif
+       return addr;
+}
+
 /* Compute SRAT address from RSDP. */
 static unsigned long get_acpi_srat_table(void)
 {
index d6662fdef3001da7465dda8df8aed58a48fe6f58..82bc60c8acb240c29f925df7dde03ac724edf3b4 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/e820/types.h>
 #include <asm/setup.h>
 #include <asm/desc.h>
+#include <asm/boot.h>
 
 #include "../string.h"
 #include "eboot.h"
@@ -813,7 +814,8 @@ efi_main(struct efi_config *c, struct boot_params *boot_params)
                status = efi_relocate_kernel(sys_table, &bzimage_addr,
                                             hdr->init_size, hdr->init_size,
                                             hdr->pref_address,
-                                            hdr->kernel_alignment);
+                                            hdr->kernel_alignment,
+                                            LOAD_PHYSICAL_ADDR);
                if (status != EFI_SUCCESS) {
                        efi_printk(sys_table, "efi_relocate_kernel() failed!\n");
                        goto fail;
index 53ac0cb2396da1b336bdb6b4375fe8bd977bbb32..9652d5c2afda84d5bf2dbfa6ad67f53f855609ff 100644 (file)
@@ -345,6 +345,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
 {
        const unsigned long kernel_total_size = VO__end - VO__text;
        unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
+       unsigned long needed_size;
 
        /* Retain x86 boot parameters pointer passed from startup_32/64. */
        boot_params = rmode;
@@ -379,26 +380,38 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
        free_mem_ptr     = heap;        /* Heap */
        free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
 
+       /*
+        * The memory hole needed for the kernel is the larger of either
+        * the entire decompressed kernel plus relocation table, or the
+        * entire decompressed kernel plus .bss and .brk sections.
+        *
+        * On X86_64, the memory is mapped with PMD pages. Round the
+        * size up so that the full extent of PMD pages mapped is
+        * included in the check against the valid memory table
+        * entries. This ensures the full mapped area is usable RAM
+        * and doesn't include any reserved areas.
+        */
+       needed_size = max(output_len, kernel_total_size);
+#ifdef CONFIG_X86_64
+       needed_size = ALIGN(needed_size, MIN_KERNEL_ALIGN);
+#endif
+
        /* Report initial kernel position details. */
        debug_putaddr(input_data);
        debug_putaddr(input_len);
        debug_putaddr(output);
        debug_putaddr(output_len);
        debug_putaddr(kernel_total_size);
+       debug_putaddr(needed_size);
 
 #ifdef CONFIG_X86_64
        /* Report address of 32-bit trampoline */
        debug_putaddr(trampoline_32bit);
 #endif
 
-       /*
-        * The memory hole needed for the kernel is the larger of either
-        * the entire decompressed kernel plus relocation table, or the
-        * entire decompressed kernel plus .bss and .brk sections.
-        */
        choose_random_location((unsigned long)input_data, input_len,
                                (unsigned long *)&output,
-                               max(output_len, kernel_total_size),
+                               needed_size,
                                &virt_addr);
 
        /* Validate memory location choices. */
index e7d35f60d53fcc05e3dfef02f7af98eb84c40186..64c3e70b0556b95ebc9a4a7c18db562770ed4b68 100644 (file)
@@ -5,12 +5,14 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
+#include <linux/jiffies.h>
 #include <asm/apicdef.h>
 #include <asm/nmi.h>
 
 #include "../perf_event.h"
 
-static DEFINE_PER_CPU(unsigned int, perf_nmi_counter);
+static DEFINE_PER_CPU(unsigned long, perf_nmi_tstamp);
+static unsigned long perf_nmi_window;
 
 static __initconst const u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
@@ -641,11 +643,12 @@ static void amd_pmu_disable_event(struct perf_event *event)
  * handler when multiple PMCs are active or PMC overflow while handling some
  * other source of an NMI.
  *
- * Attempt to mitigate this by using the number of active PMCs to determine
- * whether to return NMI_HANDLED if the perf NMI handler did not handle/reset
- * any PMCs. The per-CPU perf_nmi_counter variable is set to a minimum of the
- * number of active PMCs or 2. The value of 2 is used in case an NMI does not
- * arrive at the LAPIC in time to be collapsed into an already pending NMI.
+ * Attempt to mitigate this by creating an NMI window in which un-handled NMIs
+ * received during this window will be claimed. This prevents extending the
+ * window past when it is possible that latent NMIs should be received. The
+ * per-CPU perf_nmi_tstamp will be set to the window end time whenever perf has
+ * handled a counter. When an un-handled NMI is received, it will be claimed
+ * only if arriving within that window.
  */
 static int amd_pmu_handle_irq(struct pt_regs *regs)
 {
@@ -663,21 +666,19 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
        handled = x86_pmu_handle_irq(regs);
 
        /*
-        * If a counter was handled, record the number of possible remaining
-        * NMIs that can occur.
+        * If a counter was handled, record a timestamp such that un-handled
+        * NMIs will be claimed if arriving within that window.
         */
        if (handled) {
-               this_cpu_write(perf_nmi_counter,
-                              min_t(unsigned int, 2, active));
+               this_cpu_write(perf_nmi_tstamp,
+                              jiffies + perf_nmi_window);
 
                return handled;
        }
 
-       if (!this_cpu_read(perf_nmi_counter))
+       if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
                return NMI_DONE;
 
-       this_cpu_dec(perf_nmi_counter);
-
        return NMI_HANDLED;
 }
 
@@ -909,6 +910,9 @@ static int __init amd_core_pmu_init(void)
        if (!boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
                return 0;
 
+       /* Avoid calulating the value each time in the NMI handler */
+       perf_nmi_window = msecs_to_jiffies(100);
+
        switch (boot_cpu_data.x86) {
        case 0x15:
                pr_cont("Fam15h ");
index 5b35b7ea5d7282d870b7d0d2a519620793deba76..26c36357c4c9c6b809abb04d07e43aba80675bf3 100644 (file)
@@ -377,7 +377,8 @@ static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
                                          struct hw_perf_event *hwc, u64 config)
 {
        config &= ~perf_ibs->cnt_mask;
-       wrmsrl(hwc->config_base, config);
+       if (boot_cpu_data.x86 == 0x10)
+               wrmsrl(hwc->config_base, config);
        config &= ~perf_ibs->enable_mask;
        wrmsrl(hwc->config_base, config);
 }
@@ -553,7 +554,8 @@ static struct perf_ibs perf_ibs_op = {
        },
        .msr                    = MSR_AMD64_IBSOPCTL,
        .config_mask            = IBS_OP_CONFIG_MASK,
-       .cnt_mask               = IBS_OP_MAX_CNT,
+       .cnt_mask               = IBS_OP_MAX_CNT | IBS_OP_CUR_CNT |
+                                 IBS_OP_CUR_CNT_RAND,
        .enable_mask            = IBS_OP_ENABLE,
        .valid_mask             = IBS_OP_VAL,
        .max_period             = IBS_OP_MAX_CNT << 4,
@@ -614,7 +616,7 @@ fail:
        if (event->attr.sample_type & PERF_SAMPLE_RAW)
                offset_max = perf_ibs->offset_max;
        else if (check_rip)
-               offset_max = 2;
+               offset_max = 3;
        else
                offset_max = 1;
        do {
index 27ee47a7be6685db3c7b4d20bc0b5ef7ab7a0006..fcef678c342304ecea5bb9c29af16b9502f3cbc9 100644 (file)
@@ -4983,6 +4983,8 @@ __init int intel_pmu_init(void)
        case INTEL_FAM6_SKYLAKE:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
                x86_add_quirk(intel_pebs_isolation_quirk);
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
@@ -5031,6 +5033,8 @@ __init int intel_pmu_init(void)
                /* fall through */
        case INTEL_FAM6_ICELAKE_L:
        case INTEL_FAM6_ICELAKE:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
index 9f2f39003d96eb2a9ae3a9917627536fe2cda4e8..e1daf4151e11608bfc519562657177bfcd11755e 100644 (file)
  *     MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
                                              CNL
*                                             CNL,KBL,CML
  *                            Scope: Core
  *     MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- *                                             SKL,KNL,GLM,CNL
+ *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Core
  *     MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
  *                            perf code: 0x03
- *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL
+ *                            Available model: SNB,IVB,HSW,BDW,SKL,CNL,KBL,CML,
+ *                                             ICL,TGL
  *                            Scope: Core
  *     MSR_PKG_C2_RESIDENCY:  Package C2 Residency Counter.
  *                            perf code: 0x00
- *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL
+ *                            Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
+ *                                             KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C3_RESIDENCY:  Package C3 Residency Counter.
  *                            perf code: 0x01
  *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- *                                             GLM,CNL
+ *                                             GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C6_RESIDENCY:  Package C6 Residency Counter.
  *                            perf code: 0x02
  *                            Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- *                                             SKL,KNL,GLM,CNL
+ *                                             SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C7_RESIDENCY:  Package C7 Residency Counter.
  *                            perf code: 0x03
- *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL
+ *                            Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL,
+ *                                             KBL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C8_RESIDENCY:  Package C8 Residency Counter.
  *                            perf code: 0x04
- *                            Available model: HSW ULT,KBL,CNL
+ *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C9_RESIDENCY:  Package C9 Residency Counter.
  *                            perf code: 0x05
- *                            Available model: HSW ULT,KBL,CNL
+ *                            Available model: HSW ULT,KBL,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *     MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
  *                            perf code: 0x06
- *                            Available model: HSW ULT,KBL,GLM,CNL
+ *                            Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
  *                            Scope: Package (physical package)
  *
  */
@@ -544,6 +547,19 @@ static const struct cstate_model cnl_cstates __initconst = {
                                  BIT(PERF_CSTATE_PKG_C10_RES),
 };
 
+static const struct cstate_model icl_cstates __initconst = {
+       .core_events            = BIT(PERF_CSTATE_CORE_C6_RES) |
+                                 BIT(PERF_CSTATE_CORE_C7_RES),
+
+       .pkg_events             = BIT(PERF_CSTATE_PKG_C2_RES) |
+                                 BIT(PERF_CSTATE_PKG_C3_RES) |
+                                 BIT(PERF_CSTATE_PKG_C6_RES) |
+                                 BIT(PERF_CSTATE_PKG_C7_RES) |
+                                 BIT(PERF_CSTATE_PKG_C8_RES) |
+                                 BIT(PERF_CSTATE_PKG_C9_RES) |
+                                 BIT(PERF_CSTATE_PKG_C10_RES),
+};
+
 static const struct cstate_model slm_cstates __initconst = {
        .core_events            = BIT(PERF_CSTATE_CORE_C1_RES) |
                                  BIT(PERF_CSTATE_CORE_C6_RES),
@@ -614,6 +630,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_L, hswult_cstates),
        X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE,   hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE_L, hswult_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_COMETLAKE, hswult_cstates),
 
        X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_L, cnl_cstates),
 
@@ -625,8 +643,10 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
 
        X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
 
-       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, snb_cstates),
-       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE,   snb_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE,   icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE_L, icl_cstates),
+       X86_CSTATES_MODEL(INTEL_FAM6_TIGERLAKE, icl_cstates),
        { },
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
index 74e80ed9c6c474551b57c6d8b4d548aeb3ea8e4d..05e43d0f430bcdfd89999dc3e31369b49ad0ca4c 100644 (file)
@@ -627,7 +627,7 @@ static struct topa *topa_alloc(int cpu, gfp_t gfp)
         * link as the 2nd entry in the table
         */
        if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) {
-               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p);
+               TOPA_ENTRY(&tp->topa, 1)->base = page_to_phys(p) >> TOPA_SHIFT;
                TOPA_ENTRY(&tp->topa, 1)->end = 1;
        }
 
index 6fc2e06ab4c6f9dfa45feb6f8762c97546203b2d..86467f85c383100789d0430a80ffc4989d12af03 100644 (file)
@@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
        local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
        uncore_enable_event(box, event);
 
-       if (box->n_active == 1) {
-               uncore_enable_box(box);
+       if (box->n_active == 1)
                uncore_pmu_start_hrtimer(box);
-       }
 }
 
 void uncore_pmu_event_stop(struct perf_event *event, int flags)
@@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
                WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
                hwc->state |= PERF_HES_STOPPED;
 
-               if (box->n_active == 0) {
-                       uncore_disable_box(box);
+               if (box->n_active == 0)
                        uncore_pmu_cancel_hrtimer(box);
-               }
        }
 
        if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
@@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
        return ret;
 }
 
+static void uncore_pmu_enable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->enable_box)
+               uncore_pmu->type->ops->enable_box(box);
+}
+
+static void uncore_pmu_disable(struct pmu *pmu)
+{
+       struct intel_uncore_pmu *uncore_pmu;
+       struct intel_uncore_box *box;
+
+       uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+       if (!uncore_pmu)
+               return;
+
+       box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+       if (!box)
+               return;
+
+       if (uncore_pmu->type->ops->disable_box)
+               uncore_pmu->type->ops->disable_box(box);
+}
+
 static ssize_t uncore_get_attr_cpumask(struct device *dev,
                                struct device_attribute *attr, char *buf)
 {
@@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
                pmu->pmu = (struct pmu) {
                        .attr_groups    = pmu->type->attr_groups,
                        .task_ctx_nr    = perf_invalid_context,
+                       .pmu_enable     = uncore_pmu_enable,
+                       .pmu_disable    = uncore_pmu_disable,
                        .event_init     = uncore_pmu_event_init,
                        .add            = uncore_pmu_event_add,
                        .del            = uncore_pmu_event_del,
index f36f7bebbc1bbaca769c9f978efda52fa9e39897..bbfdaa720b4568ded56b575d366febf6e296e319 100644 (file)
@@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
        return -EINVAL;
 }
 
-static inline void uncore_disable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->disable_box)
-               box->pmu->type->ops->disable_box(box);
-}
-
-static inline void uncore_enable_box(struct intel_uncore_box *box)
-{
-       if (box->pmu->type->ops->enable_box)
-               box->pmu->type->ops->enable_box(box);
-}
-
 static inline void uncore_disable_event(struct intel_uncore_box *box,
                                struct perf_event *event)
 {
index b1afc77f0704481157cd4c3cafbecfbb31a86136..6f86650b3f77d73cc5dd6ebec2dbca8241a18ed2 100644 (file)
@@ -89,7 +89,14 @@ static bool test_intel(int idx, void *data)
        case INTEL_FAM6_SKYLAKE_X:
        case INTEL_FAM6_KABYLAKE_L:
        case INTEL_FAM6_KABYLAKE:
+       case INTEL_FAM6_COMETLAKE_L:
+       case INTEL_FAM6_COMETLAKE:
        case INTEL_FAM6_ICELAKE_L:
+       case INTEL_FAM6_ICELAKE:
+       case INTEL_FAM6_ICELAKE_X:
+       case INTEL_FAM6_ICELAKE_D:
+       case INTEL_FAM6_TIGERLAKE_L:
+       case INTEL_FAM6_TIGERLAKE:
                if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
                        return true;
                break;
index 5c056b8aebefc83468729d0a8bba6e171914c21e..e01078e93dd34f1f4ea2e9b6e4b1174c64e947f6 100644 (file)
@@ -260,11 +260,21 @@ void __init hv_apic_init(void)
        }
 
        if (ms_hyperv.hints & HV_X64_APIC_ACCESS_RECOMMENDED) {
-               pr_info("Hyper-V: Using MSR based APIC access\n");
+               pr_info("Hyper-V: Using enlightened APIC (%s mode)",
+                       x2apic_enabled() ? "x2apic" : "xapic");
+               /*
+                * With x2apic, architectural x2apic MSRs are equivalent to the
+                * respective synthetic MSRs, so there's no need to override
+                * the apic accessors.  The only exception is
+                * hv_apic_eoi_write, because it benefits from lazy EOI when
+                * available, but it works for both xapic and x2apic modes.
+                */
                apic_set_eoi_write(hv_apic_eoi_write);
-               apic->read      = hv_apic_read;
-               apic->write     = hv_apic_write;
-               apic->icr_write = hv_apic_icr_write;
-               apic->icr_read  = hv_apic_icr_read;
+               if (!x2apic_enabled()) {
+                       apic->read      = hv_apic_read;
+                       apic->write     = hv_apic_write;
+                       apic->icr_write = hv_apic_icr_write;
+                       apic->icr_read  = hv_apic_icr_read;
+               }
        }
 }
index cff3f3f3bfe0895a4e6c78b515b86c29b54cb2ca..8348f7d69fd5862fa3f97d666443f688193a3cc2 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 
 #ifndef _ASM_X86_CPU_ENTRY_AREA_H
 #define _ASM_X86_CPU_ENTRY_AREA_H
index f04622500da396ff87f63bac499abbb3d901409d..c606c0b7073824ac70f94e1d2fe24ff91320326e 100644 (file)
@@ -83,6 +83,9 @@
 #define INTEL_FAM6_TIGERLAKE_L         0x8C
 #define INTEL_FAM6_TIGERLAKE           0x8D
 
+#define INTEL_FAM6_COMETLAKE           0xA5
+#define INTEL_FAM6_COMETLAKE_L         0xA6
+
 /* "Small Core" Processors (Atom) */
 
 #define INTEL_FAM6_ATOM_BONNELL                0x1C /* Diamondville, Pineview */
index 23edf56cf577c97bfd3719933b8f56e5be7e0f8c..24d6598dea29b2d048d41ae5c545c1366312c615 100644 (file)
@@ -219,13 +219,6 @@ enum {
                                 PFERR_WRITE_MASK |             \
                                 PFERR_PRESENT_MASK)
 
-/*
- * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
- * Access Tracking SPTEs. We use bit 62 instead of bit 63 to avoid conflicting
- * with the SVE bit in EPT PTEs.
- */
-#define SPTE_SPECIAL_MASK (1ULL << 62)
-
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
 /*
@@ -1196,7 +1189,7 @@ struct kvm_x86_ops {
        int (*set_nested_state)(struct kvm_vcpu *vcpu,
                                struct kvm_nested_state __user *user_kvm_nested_state,
                                struct kvm_nested_state *kvm_state);
-       void (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
 
        int (*smi_allowed)(struct kvm_vcpu *vcpu);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
index e28f8b723b5c17d4d26f649f84c89781cdbbf738..9d5252c9685c6a555215bcf769855bd70fc776d5 100644 (file)
@@ -21,7 +21,7 @@
 #define MWAIT_ECX_INTERRUPT_BREAK      0x1
 #define MWAITX_ECX_TIMER_ENABLE                BIT(1)
 #define MWAITX_MAX_LOOPS               ((u32)-1)
-#define MWAITX_DISABLE_CSTATES         0xf
+#define MWAITX_DISABLE_CSTATES         0xf0
 
 static inline void __monitor(const void *eax, unsigned long ecx,
                             unsigned long edx)
index 5df09a0b80b801bcd6cc9adc780dec594d84ccad..07375b476c4fda6cfd89229826bf21e331ef5190 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 #ifndef _ASM_X86_PTI_H
 #define _ASM_X86_PTI_H
 #ifndef __ASSEMBLY__
index 35c225ede0e4fb55476c94b6055325a64486d534..61d93f062a36e0a567a69ee29ee7337deefa5e67 100644 (file)
@@ -734,5 +734,28 @@ do {                                                                               \
        if (unlikely(__gu_err)) goto err_label;                                 \
 } while (0)
 
+/*
+ * We want the unsafe accessors to always be inlined and use
+ * the error labels - thus the macro games.
+ */
+#define unsafe_copy_loop(dst, src, len, type, label)                   \
+       while (len >= sizeof(type)) {                                   \
+               unsafe_put_user(*(type *)src,(type __user *)dst,label); \
+               dst += sizeof(type);                                    \
+               src += sizeof(type);                                    \
+               len -= sizeof(type);                                    \
+       }
+
+#define unsafe_copy_to_user(_dst,_src,_len,label)                      \
+do {                                                                   \
+       char __user *__ucu_dst = (_dst);                                \
+       const char *__ucu_src = (_src);                                 \
+       size_t __ucu_len = (_len);                                      \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label);  \
+       unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label);   \
+} while (0)
+
 #endif /* _ASM_X86_UACCESS_H */
 
index e00c9e87593327e781d72ff9c3b5a2545fc986c0..ac9fc51e2b1862f5dff7264a1978f3d399b5c847 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <asm/cpufeatures.h>
 #include <asm/alternative.h>
+#include <linux/stringify.h>
 
 /*
  * The hypercall definitions differ in the low word of the %edx argument
@@ -20,8 +21,8 @@
  */
 
 /* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT    "0x5658"
-#define VMWARE_HYPERVISOR_PORT_HB "0x5659"
+#define VMWARE_HYPERVISOR_PORT    0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
 
 /* Current vmcall / vmmcall version */
 #define VMWARE_HYPERVISOR_HB   BIT(0)
@@ -29,7 +30,8 @@
 
 /* The low bandwidth call. The low word of edx is presumed clear. */
 #define VMWARE_HYPERCALL                                               \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT ", %%dx; inl (%%dx)", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT) ", %%dx; " \
+                     "inl (%%dx), %%eax",                              \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -38,7 +40,8 @@
  * HB and OUT bits set.
  */
 #define VMWARE_HYPERCALL_HB_OUT                                                \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep outsb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep outsb",                                      \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 
@@ -47,7 +50,8 @@
  * HB bit set.
  */
 #define VMWARE_HYPERCALL_HB_IN                                         \
-       ALTERNATIVE_2("movw $" VMWARE_HYPERVISOR_PORT_HB ", %%dx; rep insb", \
+       ALTERNATIVE_2("movw $" __stringify(VMWARE_HYPERVISOR_PORT_HB) ", %%dx; " \
+                     "rep insb",                                       \
                      "vmcall", X86_FEATURE_VMCALL,                     \
                      "vmmcall", X86_FEATURE_VMW_VMMCALL)
 #endif
index 45e92cba92f52cb1627e7dc270d96abf1b0e491e..b0889c48a2ac5e02e2ff64c53b1b1f8e25cef7eb 100644 (file)
@@ -156,7 +156,8 @@ static int x2apic_dead_cpu(unsigned int dead_cpu)
 {
        struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
 
-       cpumask_clear_cpu(dead_cpu, &cmsk->mask);
+       if (cmsk)
+               cpumask_clear_cpu(dead_cpu, &cmsk->mask);
        free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
        return 0;
 }
index 267daad8c0360890672e7db5369fde36fb58174e..c656d92cd708f8aac790968e6b38714aacdf6ecc 100644 (file)
@@ -216,6 +216,10 @@ static void __init ms_hyperv_init_platform(void)
        int hv_host_info_ecx;
        int hv_host_info_edx;
 
+#ifdef CONFIG_PARAVIRT
+       pv_info.name = "Hyper-V";
+#endif
+
        /*
         * Extract the features and hints
         */
index 9735139cfdf86bcdaee7ea29a3ba7ab9a6d99270..46d732696c1c8b6c94652ce7e7b76e1284d50216 100644 (file)
@@ -49,7 +49,7 @@
 #define VMWARE_CMD_VCPU_RESERVED 31
 
 #define VMWARE_PORT(cmd, eax, ebx, ecx, edx)                           \
-       __asm__("inl (%%dx)" :                                          \
+       __asm__("inl (%%dx), %%eax" :                                   \
                "=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :            \
                "a"(VMWARE_HYPERVISOR_MAGIC),                           \
                "c"(VMWARE_CMD_##cmd),                                  \
index 29ffa495bd1cb7333b77e33aa80a055ec0ec6d69..206a4b6144c2e1d9a0780a82424c4817bf390d7d 100644 (file)
@@ -222,13 +222,31 @@ unsigned long __head __startup_64(unsigned long physaddr,
         * we might write invalid pmds, when the kernel is relocated
         * cleanup_highmap() fixes this up along with the mappings
         * beyond _end.
+        *
+        * Only the region occupied by the kernel image has so far
+        * been checked against the table of usable memory regions
+        * provided by the firmware, so invalidate pages outside that
+        * region. A page table entry that maps to a reserved area of
+        * memory would allow processor speculation into that area,
+        * and on some hardware (particularly the UV platform) even
+        * speculative access to some reserved areas is caught as an
+        * error, causing the BIOS to halt the system.
         */
 
        pmd = fixup_pointer(level2_kernel_pgt, physaddr);
-       for (i = 0; i < PTRS_PER_PMD; i++) {
+
+       /* invalidate pages before the kernel image */
+       for (i = 0; i < pmd_index((unsigned long)_text); i++)
+               pmd[i] &= ~_PAGE_PRESENT;
+
+       /* fixup pages that are part of the kernel image */
+       for (; i <= pmd_index((unsigned long)_end); i++)
                if (pmd[i] & _PAGE_PRESENT)
                        pmd[i] += load_delta;
-       }
+
+       /* invalidate pages after the kernel image */
+       for (; i < PTRS_PER_PMD; i++)
+               pmd[i] &= ~_PAGE_PRESENT;
 
        /*
         * Fixup phys_base - remove the memory encryption mask to obtain
index 320ab978fb1f3149b314beb8003c5b224490e7ca..1d0797b2338a291a9ba6ec89f0ec7aead1b4fbd0 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 //
 // Code shared between 32 and 64 bit
 
index 63316036f85a01cc22bcadca11fe9301d35f7a65..f68c0c753c38f6fd29703f98b0646f1372969407 100644 (file)
@@ -363,7 +363,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry, int index)
 
        /* cpuid 7.0.ecx*/
        const u32 kvm_cpuid_7_0_ecx_x86_features =
-               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
+               F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
                F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
                F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
                F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/;
@@ -485,6 +485,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
 
        /* cpuid 0x80000008.ebx */
        const u32 kvm_cpuid_8000_0008_ebx_x86_features =
+               F(CLZERO) | F(XSAVEERPTR) |
                F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
                F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON);
 
@@ -618,16 +619,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
         */
        case 0x1f:
        case 0xb: {
-               int i, level_type;
+               int i;
 
-               /* read more entries until level_type is zero */
-               for (i = 1; ; ++i) {
+               /*
+                * We filled in entry[0] for CPUID(EAX=<function>,
+                * ECX=00H) above.  If its level type (ECX[15:8]) is
+                * zero, then the leaf is unimplemented, and we're
+                * done.  Otherwise, continue to populate entries
+                * until the level type (ECX[15:8]) of the previously
+                * added entry is zero.
+                */
+               for (i = 1; entry[i - 1].ecx & 0xff00; ++i) {
                        if (*nent >= maxnent)
                                goto out;
 
-                       level_type = entry[i - 1].ecx & 0xff00;
-                       if (!level_type)
-                               break;
                        do_host_cpuid(&entry[i], function, i);
                        ++*nent;
                }
@@ -969,53 +974,66 @@ struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
 EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
 
 /*
- * If no match is found, check whether we exceed the vCPU's limit
- * and return the content of the highest valid _standard_ leaf instead.
- * This is to satisfy the CPUID specification.
+ * If the basic or extended CPUID leaf requested is higher than the
+ * maximum supported basic or extended leaf, respectively, then it is
+ * out of range.
  */
-static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
-                                                  u32 function, u32 index)
+static bool cpuid_function_in_range(struct kvm_vcpu *vcpu, u32 function)
 {
-       struct kvm_cpuid_entry2 *maxlevel;
-
-       maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
-       if (!maxlevel || maxlevel->eax >= function)
-               return NULL;
-       if (function & 0x80000000) {
-               maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
-               if (!maxlevel)
-                       return NULL;
-       }
-       return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
+       struct kvm_cpuid_entry2 *max;
+
+       max = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
+       return max && function <= max->eax;
 }
 
 bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
               u32 *ecx, u32 *edx, bool check_limit)
 {
        u32 function = *eax, index = *ecx;
-       struct kvm_cpuid_entry2 *best;
-       bool entry_found = true;
-
-       best = kvm_find_cpuid_entry(vcpu, function, index);
-
-       if (!best) {
-               entry_found = false;
-               if (!check_limit)
-                       goto out;
+       struct kvm_cpuid_entry2 *entry;
+       struct kvm_cpuid_entry2 *max;
+       bool found;
 
-               best = check_cpuid_limit(vcpu, function, index);
+       entry = kvm_find_cpuid_entry(vcpu, function, index);
+       found = entry;
+       /*
+        * Intel CPUID semantics treats any query for an out-of-range
+        * leaf as if the highest basic leaf (i.e. CPUID.0H:EAX) were
+        * requested. AMD CPUID semantics returns all zeroes for any
+        * undefined leaf, whether or not the leaf is in range.
+        */
+       if (!entry && check_limit && !guest_cpuid_is_amd(vcpu) &&
+           !cpuid_function_in_range(vcpu, function)) {
+               max = kvm_find_cpuid_entry(vcpu, 0, 0);
+               if (max) {
+                       function = max->eax;
+                       entry = kvm_find_cpuid_entry(vcpu, function, index);
+               }
        }
-
-out:
-       if (best) {
-               *eax = best->eax;
-               *ebx = best->ebx;
-               *ecx = best->ecx;
-               *edx = best->edx;
-       } else
+       if (entry) {
+               *eax = entry->eax;
+               *ebx = entry->ebx;
+               *ecx = entry->ecx;
+               *edx = entry->edx;
+       } else {
                *eax = *ebx = *ecx = *edx = 0;
-       trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
-       return entry_found;
+               /*
+                * When leaf 0BH or 1FH is defined, CL is pass-through
+                * and EDX is always the x2APIC ID, even for undefined
+                * subleaves. Index 1 will exist iff the leaf is
+                * implemented, so we pass through CL iff leaf 1
+                * exists. EDX can be copied from any existing index.
+                */
+               if (function == 0xb || function == 0x1f) {
+                       entry = kvm_find_cpuid_entry(vcpu, function, 1);
+                       if (entry) {
+                               *ecx = index & 0xff;
+                               *edx = entry->edx;
+                       }
+               }
+       }
+       trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, found);
+       return found;
 }
 EXPORT_SYMBOL_GPL(kvm_cpuid);
 
index 3a3a6854dccaeed071f4fa4a3800a3d002c06269..b29d00b661ff2155a3530bd6eeb094287a981f37 100644 (file)
 #define X2APIC_BROADCAST               0xFFFFFFFFul
 
 static bool lapic_timer_advance_dynamic __read_mostly;
-#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100
-#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 5000
-#define LAPIC_TIMER_ADVANCE_ADJUST_INIT 1000
+#define LAPIC_TIMER_ADVANCE_ADJUST_MIN 100     /* clock cycles */
+#define LAPIC_TIMER_ADVANCE_ADJUST_MAX 10000   /* clock cycles */
+#define LAPIC_TIMER_ADVANCE_NS_INIT    1000
+#define LAPIC_TIMER_ADVANCE_NS_MAX     5000
 /* step-by-step approximation to mitigate fluctuation */
 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
 
@@ -110,11 +111,6 @@ static inline int apic_enabled(struct kvm_lapic *apic)
        (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
         APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
 
-static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
-{
-       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
-}
-
 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
 {
        return apic->vcpu->vcpu_id;
@@ -1504,8 +1500,8 @@ static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
                timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
        }
 
-       if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_ADJUST_MAX))
-               timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
+       if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
+               timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
        apic->lapic_timer.timer_advance_ns = timer_advance_ns;
 }
 
@@ -2302,7 +2298,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
                     HRTIMER_MODE_ABS_HARD);
        apic->lapic_timer.timer.function = apic_timer_fn;
        if (timer_advance_ns == -1) {
-               apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_ADJUST_INIT;
+               apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
                lapic_timer_advance_dynamic = true;
        } else {
                apic->lapic_timer.timer_advance_ns = timer_advance_ns;
index 2aad7e226fc016e130f29746f4fa2798fb811126..1f5014852e208e1c38c9d0c7c57fa0ae8fc6d110 100644 (file)
@@ -242,4 +242,9 @@ static inline enum lapic_mode kvm_apic_mode(u64 apic_base)
        return apic_base & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE);
 }
 
+static inline u8 kvm_xapic_id(struct kvm_lapic *apic)
+{
+       return kvm_lapic_get_reg(apic, APIC_ID) >> 24;
+}
+
 #endif
index 5269aa057dfa609341b4afa5ac9bb3ada7748855..24c23c66b2263c5b20b86119c192711006e7e189 100644 (file)
@@ -83,7 +83,17 @@ module_param(dbg, bool, 0644);
 #define PTE_PREFETCH_NUM               8
 
 #define PT_FIRST_AVAIL_BITS_SHIFT 10
-#define PT64_SECOND_AVAIL_BITS_SHIFT 52
+#define PT64_SECOND_AVAIL_BITS_SHIFT 54
+
+/*
+ * The mask used to denote special SPTEs, which can be either MMIO SPTEs or
+ * Access Tracking SPTEs.
+ */
+#define SPTE_SPECIAL_MASK (3ULL << 52)
+#define SPTE_AD_ENABLED_MASK (0ULL << 52)
+#define SPTE_AD_DISABLED_MASK (1ULL << 52)
+#define SPTE_AD_WRPROT_ONLY_MASK (2ULL << 52)
+#define SPTE_MMIO_MASK (3ULL << 52)
 
 #define PT64_LEVEL_BITS 9
 
@@ -219,12 +229,11 @@ static u64 __read_mostly shadow_present_mask;
 static u64 __read_mostly shadow_me_mask;
 
 /*
- * SPTEs used by MMUs without A/D bits are marked with shadow_acc_track_value.
- * Non-present SPTEs with shadow_acc_track_value set are in place for access
- * tracking.
+ * SPTEs used by MMUs without A/D bits are marked with SPTE_AD_DISABLED_MASK;
+ * shadow_acc_track_mask is the set of bits to be cleared in non-accessed
+ * pages.
  */
 static u64 __read_mostly shadow_acc_track_mask;
-static const u64 shadow_acc_track_value = SPTE_SPECIAL_MASK;
 
 /*
  * The mask/shift to use for saving the original R/X bits when marking the PTE
@@ -304,7 +313,7 @@ void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
 {
        BUG_ON((u64)(unsigned)access_mask != access_mask);
        BUG_ON((mmio_mask & mmio_value) != mmio_value);
-       shadow_mmio_value = mmio_value | SPTE_SPECIAL_MASK;
+       shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
        shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
        shadow_mmio_access_mask = access_mask;
 }
@@ -320,10 +329,27 @@ static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
        return sp->role.ad_disabled;
 }
 
+static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+{
+       /*
+        * When using the EPT page-modification log, the GPAs in the log
+        * would come from L2 rather than L1.  Therefore, we need to rely
+        * on write protection to record dirty pages.  This also bypasses
+        * PML, since writes now result in a vmexit.
+        */
+       return vcpu->arch.mmu == &vcpu->arch.guest_mmu;
+}
+
 static inline bool spte_ad_enabled(u64 spte)
 {
        MMU_WARN_ON(is_mmio_spte(spte));
-       return !(spte & shadow_acc_track_value);
+       return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_DISABLED_MASK;
+}
+
+static inline bool spte_ad_need_write_protect(u64 spte)
+{
+       MMU_WARN_ON(is_mmio_spte(spte));
+       return (spte & SPTE_SPECIAL_MASK) != SPTE_AD_ENABLED_MASK;
 }
 
 static inline u64 spte_shadow_accessed_mask(u64 spte)
@@ -461,7 +487,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
 {
        BUG_ON(!dirty_mask != !accessed_mask);
        BUG_ON(!accessed_mask && !acc_track_mask);
-       BUG_ON(acc_track_mask & shadow_acc_track_value);
+       BUG_ON(acc_track_mask & SPTE_SPECIAL_MASK);
 
        shadow_user_mask = user_mask;
        shadow_accessed_mask = accessed_mask;
@@ -1589,16 +1615,16 @@ static bool spte_clear_dirty(u64 *sptep)
 
        rmap_printk("rmap_clear_dirty: spte %p %llx\n", sptep, *sptep);
 
+       MMU_WARN_ON(!spte_ad_enabled(spte));
        spte &= ~shadow_dirty_mask;
-
        return mmu_spte_update(sptep, spte);
 }
 
-static bool wrprot_ad_disabled_spte(u64 *sptep)
+static bool spte_wrprot_for_clear_dirty(u64 *sptep)
 {
        bool was_writable = test_and_clear_bit(PT_WRITABLE_SHIFT,
                                               (unsigned long *)sptep);
-       if (was_writable)
+       if (was_writable && !spte_ad_enabled(*sptep))
                kvm_set_pfn_dirty(spte_to_pfn(*sptep));
 
        return was_writable;
@@ -1617,10 +1643,10 @@ static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
        bool flush = false;
 
        for_each_rmap_spte(rmap_head, &iter, sptep)
-               if (spte_ad_enabled(*sptep))
-                       flush |= spte_clear_dirty(sptep);
+               if (spte_ad_need_write_protect(*sptep))
+                       flush |= spte_wrprot_for_clear_dirty(sptep);
                else
-                       flush |= wrprot_ad_disabled_spte(sptep);
+                       flush |= spte_clear_dirty(sptep);
 
        return flush;
 }
@@ -1631,6 +1657,11 @@ static bool spte_set_dirty(u64 *sptep)
 
        rmap_printk("rmap_set_dirty: spte %p %llx\n", sptep, *sptep);
 
+       /*
+        * Similar to the !kvm_x86_ops->slot_disable_log_dirty case,
+        * do not bother adding back write access to pages marked
+        * SPTE_AD_WRPROT_ONLY_MASK.
+        */
        spte |= shadow_dirty_mask;
 
        return mmu_spte_update(sptep, spte);
@@ -2622,7 +2653,7 @@ static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
               shadow_user_mask | shadow_x_mask | shadow_me_mask;
 
        if (sp_ad_disabled(sp))
-               spte |= shadow_acc_track_value;
+               spte |= SPTE_AD_DISABLED_MASK;
        else
                spte |= shadow_accessed_mask;
 
@@ -2968,7 +2999,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 
        sp = page_header(__pa(sptep));
        if (sp_ad_disabled(sp))
-               spte |= shadow_acc_track_value;
+               spte |= SPTE_AD_DISABLED_MASK;
+       else if (kvm_vcpu_ad_need_write_protect(vcpu))
+               spte |= SPTE_AD_WRPROT_ONLY_MASK;
 
        /*
         * For the EPT case, shadow_present_mask is 0 if hardware
index f8ecb6df5106641517a75380e4755dfa65967f08..c5673bda4b66df5e7672abc78e5bf520b3c3361c 100644 (file)
@@ -734,8 +734,14 @@ static int get_npt_level(struct kvm_vcpu *vcpu)
 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        vcpu->arch.efer = efer;
-       if (!npt_enabled && !(efer & EFER_LMA))
-               efer &= ~EFER_LME;
+
+       if (!npt_enabled) {
+               /* Shadow paging assumes NX to be available.  */
+               efer |= EFER_NX;
+
+               if (!(efer & EFER_LMA))
+                       efer &= ~EFER_LME;
+       }
 
        to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
        mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -4591,6 +4597,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        int ret = 0;
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (ldr == svm->ldr_reg)
                return 0;
@@ -4598,7 +4605,7 @@ static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
        avic_invalidate_logical_id_entry(vcpu);
 
        if (ldr)
-               ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr);
+               ret = avic_ldr_write(vcpu, id, ldr);
 
        if (!ret)
                svm->ldr_reg = ldr;
@@ -4610,8 +4617,7 @@ static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
 {
        u64 *old, *new;
        struct vcpu_svm *svm = to_svm(vcpu);
-       u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
-       u32 id = (apic_id_reg >> 24) & 0xff;
+       u32 id = kvm_xapic_id(vcpu->arch.apic);
 
        if (vcpu->vcpu_id == id)
                return 0;
index 41abc62c9a8a1b3b893dce684af9e0bed8f3613a..0e7c9301fe86549e42bb2959e4408299230fb82b 100644 (file)
@@ -2610,7 +2610,7 @@ static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
 
                /* VM-entry exception error code */
                if (CC(has_error_code &&
-                      vmcs12->vm_entry_exception_error_code & GENMASK(31, 15)))
+                      vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
                        return -EINVAL;
 
                /* VM-entry interruption-info field: reserved bits */
@@ -2917,7 +2917,7 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
                                                 struct vmcs12 *vmcs12);
 
-static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
+static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
 {
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -2937,19 +2937,18 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                        vmx->nested.apic_access_page = NULL;
                }
                page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
-               /*
-                * If translation failed, no matter: This feature asks
-                * to exit when accessing the given address, and if it
-                * can never be accessed, this feature won't do
-                * anything anyway.
-                */
                if (!is_error_page(page)) {
                        vmx->nested.apic_access_page = page;
                        hpa = page_to_phys(vmx->nested.apic_access_page);
                        vmcs_write64(APIC_ACCESS_ADDR, hpa);
                } else {
-                       secondary_exec_controls_clearbit(vmx,
-                               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+                       pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
+                                            __func__);
+                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       vcpu->run->internal.suberror =
+                               KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
+                       return false;
                }
        }
 
@@ -2994,6 +2993,7 @@ static void nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
                exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
        else
                exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
+       return true;
 }
 
 /*
@@ -3032,13 +3032,15 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
 /*
  * If from_vmentry is false, this is being called from state restore (either RSM
  * or KVM_SET_NESTED_STATE).  Otherwise it's called from vmlaunch/vmresume.
-+ *
-+ * Returns:
-+ *   0 - success, i.e. proceed with actual VMEnter
-+ *   1 - consistency check VMExit
-+ *  -1 - consistency check VMFail
+ *
+ * Returns:
+ *     NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
+ *     NVMX_ENTRY_VMFAIL:  Consistency check VMFail
+ *     NVMX_ENTRY_VMEXIT:  Consistency check VMExit
+ *     NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
  */
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                       bool from_vmentry)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -3081,11 +3083,12 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
        prepare_vmcs02_early(vmx, vmcs12);
 
        if (from_vmentry) {
-               nested_get_vmcs12_pages(vcpu);
+               if (unlikely(!nested_get_vmcs12_pages(vcpu)))
+                       return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
 
                if (nested_vmx_check_vmentry_hw(vcpu)) {
                        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
-                       return -1;
+                       return NVMX_VMENTRY_VMFAIL;
                }
 
                if (nested_vmx_check_guest_state(vcpu, vmcs12, &exit_qual))
@@ -3149,7 +3152,7 @@ int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry)
         * returned as far as L1 is concerned. It will only return (and set
         * the success flag) when L2 exits (see nested_vmx_vmexit()).
         */
-       return 0;
+       return NVMX_VMENTRY_SUCCESS;
 
        /*
         * A failed consistency check that leads to a VMExit during L1's
@@ -3165,14 +3168,14 @@ vmentry_fail_vmexit:
        vmx_switch_vmcs(vcpu, &vmx->vmcs01);
 
        if (!from_vmentry)
-               return 1;
+               return NVMX_VMENTRY_VMEXIT;
 
        load_vmcs12_host_state(vcpu, vmcs12);
        vmcs12->vm_exit_reason = exit_reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
        vmcs12->exit_qualification = exit_qual;
        if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
                vmx->nested.need_vmcs12_to_shadow_sync = true;
-       return 1;
+       return NVMX_VMENTRY_VMEXIT;
 }
 
 /*
@@ -3182,9 +3185,9 @@ vmentry_fail_vmexit:
 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 {
        struct vmcs12 *vmcs12;
+       enum nvmx_vmentry_status status;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
-       int ret;
 
        if (!nested_vmx_check_permission(vcpu))
                return 1;
@@ -3244,13 +3247,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
         * the nested entry.
         */
        vmx->nested.nested_run_pending = 1;
-       ret = nested_vmx_enter_non_root_mode(vcpu, true);
-       vmx->nested.nested_run_pending = !ret;
-       if (ret > 0)
-               return 1;
-       else if (ret)
-               return nested_vmx_failValid(vcpu,
-                       VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+       status = nested_vmx_enter_non_root_mode(vcpu, true);
+       if (unlikely(status != NVMX_VMENTRY_SUCCESS))
+               goto vmentry_failed;
 
        /* Hide L1D cache contents from the nested guest.  */
        vmx->vcpu.arch.l1tf_flush_l1d = true;
@@ -3281,6 +3280,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
                return kvm_vcpu_halt(vcpu);
        }
        return 1;
+
+vmentry_failed:
+       vmx->nested.nested_run_pending = 0;
+       if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
+               return 0;
+       if (status == NVMX_VMENTRY_VMEXIT)
+               return 1;
+       WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
+       return nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
 }
 
 /*
index 187d39bf0bf10ca178cc3f1a7b45e18b7000ccbe..6280f33e5fa604c745ac19f3e62fda681709c90d 100644 (file)
@@ -6,6 +6,16 @@
 #include "vmcs12.h"
 #include "vmx.h"
 
+/*
+ * Status returned by nested_vmx_enter_non_root_mode():
+ */
+enum nvmx_vmentry_status {
+       NVMX_VMENTRY_SUCCESS,           /* Entered VMX non-root mode */
+       NVMX_VMENTRY_VMFAIL,            /* Consistency check VMFail */
+       NVMX_VMENTRY_VMEXIT,            /* Consistency check VMExit */
+       NVMX_VMENTRY_KVM_INTERNAL_ERROR,/* KVM internal error */
+};
+
 void vmx_leave_nested(struct kvm_vcpu *vcpu);
 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
                                bool apicv);
@@ -13,7 +23,8 @@ void nested_vmx_hardware_unsetup(void);
 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
 void nested_vmx_vcpu_setup(void);
 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
-int nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu, bool from_vmentry);
+enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
+                                                    bool from_vmentry);
 bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason);
 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                       u32 exit_intr_info, unsigned long exit_qualification);
index 4dea0e0e7e392cb9de5771347604d99c126d24b4..3e9c059099e947af70b297804bf3fa4f6480e24e 100644 (file)
@@ -262,6 +262,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 {
        struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct x86_pmu_capability x86_pmu;
        struct kvm_cpuid_entry2 *entry;
        union cpuid10_eax eax;
        union cpuid10_edx edx;
@@ -283,8 +284,10 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        if (!pmu->version)
                return;
 
+       perf_get_x86_pmu_capability(&x86_pmu);
+
        pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
-                                       INTEL_PMC_MAX_GENERIC);
+                                        x86_pmu.num_counters_gp);
        pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
        pmu->available_event_types = ~entry->ebx &
                                        ((1ull << eax.split.mask_length) - 1);
@@ -294,7 +297,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
        } else {
                pmu->nr_arch_fixed_counters =
                        min_t(int, edx.split.num_counters_fixed,
-                               INTEL_PMC_MAX_FIXED);
+                             x86_pmu.num_counters_fixed);
                pmu->counter_bitmask[KVM_PMC_FIXED] =
                        ((u64)1 << edx.split.bit_width_fixed) - 1;
        }
index d4575ffb3cec75bcb3f1fb5330c99516d6ab1265..5d21a4ab28cfc80b3f800adfd042cb23c155a737 100644 (file)
@@ -209,6 +209,11 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
        struct page *page;
        unsigned int i;
 
+       if (!boot_cpu_has_bug(X86_BUG_L1TF)) {
+               l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_NOT_REQUIRED;
+               return 0;
+       }
+
        if (!enable_ept) {
                l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
                return 0;
@@ -964,17 +969,9 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
        u64 guest_efer = vmx->vcpu.arch.efer;
        u64 ignore_bits = 0;
 
-       if (!enable_ept) {
-               /*
-                * NX is needed to handle CR0.WP=1, CR4.SMEP=1.  Testing
-                * host CPUID is more efficient than testing guest CPUID
-                * or CR4.  Host SMEP is anyway a requirement for guest SMEP.
-                */
-               if (boot_cpu_has(X86_FEATURE_SMEP))
-                       guest_efer |= EFER_NX;
-               else if (!(guest_efer & EFER_NX))
-                       ignore_bits |= EFER_NX;
-       }
+       /* Shadow paging assumes NX to be available.  */
+       if (!enable_ept)
+               guest_efer |= EFER_NX;
 
        /*
         * LMA and LME handled by hardware; SCE meaningless outside long mode.
@@ -5538,14 +5535,6 @@ static int handle_encls(struct kvm_vcpu *vcpu)
        return 1;
 }
 
-static int handle_unexpected_vmexit(struct kvm_vcpu *vcpu)
-{
-       kvm_skip_emulated_instruction(vcpu);
-       WARN_ONCE(1, "Unexpected VM-Exit Reason = 0x%x",
-               vmcs_read32(VM_EXIT_REASON));
-       return 1;
-}
-
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -5597,15 +5586,11 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_INVVPID]                 = handle_vmx_instruction,
        [EXIT_REASON_RDRAND]                  = handle_invalid_op,
        [EXIT_REASON_RDSEED]                  = handle_invalid_op,
-       [EXIT_REASON_XSAVES]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_XRSTORS]                 = handle_unexpected_vmexit,
        [EXIT_REASON_PML_FULL]                = handle_pml_full,
        [EXIT_REASON_INVPCID]                 = handle_invpcid,
        [EXIT_REASON_VMFUNC]                  = handle_vmx_instruction,
        [EXIT_REASON_PREEMPTION_TIMER]        = handle_preemption_timer,
        [EXIT_REASON_ENCLS]                   = handle_encls,
-       [EXIT_REASON_UMWAIT]                  = handle_unexpected_vmexit,
-       [EXIT_REASON_TPAUSE]                  = handle_unexpected_vmexit,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -7995,12 +7980,10 @@ static int __init vmx_init(void)
         * contain 'auto' which will be turned into the default 'cond'
         * mitigation mode.
         */
-       if (boot_cpu_has(X86_BUG_L1TF)) {
-               r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
-               if (r) {
-                       vmx_exit();
-                       return r;
-               }
+       r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
+       if (r) {
+               vmx_exit();
+               return r;
        }
 
 #ifdef CONFIG_KEXEC_CORE
index 0ed07d8d2caa024c020e36a397db3b2a1de612ec..ff395f8127190b10986ec116eac85b30f379ab8c 100644 (file)
@@ -92,8 +92,8 @@ u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
 static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 #endif
 
-#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+#define VM_STAT(x, ...) offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__
+#define VCPU_STAT(x, ...) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__
 
 #define KVM_X2APIC_API_VALID_FLAGS (KVM_X2APIC_API_USE_32BIT_IDS | \
                                     KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK)
@@ -212,7 +212,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
        { "mmu_unsync", VM_STAT(mmu_unsync) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
-       { "largepages", VM_STAT(lpages) },
+       { "largepages", VM_STAT(lpages, .mode = 0444) },
        { "max_mmu_page_hash_collisions",
                VM_STAT(max_mmu_page_hash_collisions) },
        { NULL }
@@ -360,8 +360,7 @@ EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 asmlinkage __visible void kvm_spurious_fault(void)
 {
        /* Fault while not rebooting.  We want the trace. */
-       if (!kvm_rebooting)
-               BUG();
+       BUG_ON(!kvm_rebooting);
 }
 EXPORT_SYMBOL_GPL(kvm_spurious_fault);
 
@@ -885,34 +884,42 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 }
 EXPORT_SYMBOL_GPL(kvm_set_xcr);
 
-int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       unsigned long old_cr4 = kvm_read_cr4(vcpu);
-       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
-                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
-
        if (cr4 & CR4_RESERVED_BITS)
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
-               return 1;
+               return -EINVAL;
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+               return -EINVAL;
+
+       return 0;
+}
+
+int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+{
+       unsigned long old_cr4 = kvm_read_cr4(vcpu);
+       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                  X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
+
+       if (kvm_valid_cr4(vcpu, cr4))
                return 1;
 
        if (is_long_mode(vcpu)) {
@@ -1161,13 +1168,6 @@ static u32 msrs_to_save[] = {
        MSR_ARCH_PERFMON_PERFCTR0 + 12, MSR_ARCH_PERFMON_PERFCTR0 + 13,
        MSR_ARCH_PERFMON_PERFCTR0 + 14, MSR_ARCH_PERFMON_PERFCTR0 + 15,
        MSR_ARCH_PERFMON_PERFCTR0 + 16, MSR_ARCH_PERFMON_PERFCTR0 + 17,
-       MSR_ARCH_PERFMON_PERFCTR0 + 18, MSR_ARCH_PERFMON_PERFCTR0 + 19,
-       MSR_ARCH_PERFMON_PERFCTR0 + 20, MSR_ARCH_PERFMON_PERFCTR0 + 21,
-       MSR_ARCH_PERFMON_PERFCTR0 + 22, MSR_ARCH_PERFMON_PERFCTR0 + 23,
-       MSR_ARCH_PERFMON_PERFCTR0 + 24, MSR_ARCH_PERFMON_PERFCTR0 + 25,
-       MSR_ARCH_PERFMON_PERFCTR0 + 26, MSR_ARCH_PERFMON_PERFCTR0 + 27,
-       MSR_ARCH_PERFMON_PERFCTR0 + 28, MSR_ARCH_PERFMON_PERFCTR0 + 29,
-       MSR_ARCH_PERFMON_PERFCTR0 + 30, MSR_ARCH_PERFMON_PERFCTR0 + 31,
        MSR_ARCH_PERFMON_EVENTSEL0, MSR_ARCH_PERFMON_EVENTSEL1,
        MSR_ARCH_PERFMON_EVENTSEL0 + 2, MSR_ARCH_PERFMON_EVENTSEL0 + 3,
        MSR_ARCH_PERFMON_EVENTSEL0 + 4, MSR_ARCH_PERFMON_EVENTSEL0 + 5,
@@ -1177,13 +1177,6 @@ static u32 msrs_to_save[] = {
        MSR_ARCH_PERFMON_EVENTSEL0 + 12, MSR_ARCH_PERFMON_EVENTSEL0 + 13,
        MSR_ARCH_PERFMON_EVENTSEL0 + 14, MSR_ARCH_PERFMON_EVENTSEL0 + 15,
        MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 18, MSR_ARCH_PERFMON_EVENTSEL0 + 19,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 20, MSR_ARCH_PERFMON_EVENTSEL0 + 21,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 22, MSR_ARCH_PERFMON_EVENTSEL0 + 23,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 24, MSR_ARCH_PERFMON_EVENTSEL0 + 25,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 26, MSR_ARCH_PERFMON_EVENTSEL0 + 27,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 28, MSR_ARCH_PERFMON_EVENTSEL0 + 29,
-       MSR_ARCH_PERFMON_EVENTSEL0 + 30, MSR_ARCH_PERFMON_EVENTSEL0 + 31,
 };
 
 static unsigned num_msrs_to_save;
@@ -2543,6 +2536,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.pv_time_enabled = false;
+       vcpu->arch.time = 0;
 }
 
 static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
@@ -2708,8 +2702,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_KVM_SYSTEM_TIME: {
                struct kvm_arch *ka = &vcpu->kvm->arch;
 
-               kvmclock_reset(vcpu);
-
                if (vcpu->vcpu_id == 0 && !msr_info->host_initiated) {
                        bool tmp = (msr == MSR_KVM_SYSTEM_TIME);
 
@@ -2723,14 +2715,13 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                kvm_make_request(KVM_REQ_GLOBAL_CLOCK_UPDATE, vcpu);
 
                /* we verify if the enable bit is set... */
+               vcpu->arch.pv_time_enabled = false;
                if (!(data & 1))
                        break;
 
-               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+               if (!kvm_gfn_to_hva_cache_init(vcpu->kvm,
                     &vcpu->arch.pv_time, data & ~1ULL,
                     sizeof(struct pvclock_vcpu_time_info)))
-                       vcpu->arch.pv_time_enabled = false;
-               else
                        vcpu->arch.pv_time_enabled = true;
 
                break;
@@ -5097,13 +5088,14 @@ out:
 
 static void kvm_init_msr_list(void)
 {
+       struct x86_pmu_capability x86_pmu;
        u32 dummy[2];
        unsigned i, j;
 
        BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
                         "Please update the fixed PMCs in msrs_to_save[]");
-       BUILD_BUG_ON_MSG(INTEL_PMC_MAX_GENERIC != 32,
-                        "Please update the generic perfctr/eventsel MSRs in msrs_to_save[]");
+
+       perf_get_x86_pmu_capability(&x86_pmu);
 
        for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) {
                if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
@@ -5145,6 +5137,15 @@ static void kvm_init_msr_list(void)
                                intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
                                continue;
                        break;
+               case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
+                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
+                           min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
+                               continue;
+                       break;
+               case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
+                       if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
+                           min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
+                               continue;
                }
                default:
                        break;
@@ -7937,8 +7938,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
-               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu))
-                       kvm_x86_ops->get_vmcs12_pages(vcpu);
+               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
+                       if (unlikely(!kvm_x86_ops->get_vmcs12_pages(vcpu))) {
+                               r = 0;
+                               goto out;
+                       }
+               }
                if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
                        kvm_mmu_unload(vcpu);
                if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
@@ -8714,10 +8719,6 @@ EXPORT_SYMBOL_GPL(kvm_task_switch);
 
 static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
 {
-       if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
-                       (sregs->cr4 & X86_CR4_OSXSAVE))
-               return  -EINVAL;
-
        if ((sregs->efer & EFER_LME) && (sregs->cr0 & X86_CR0_PG)) {
                /*
                 * When EFER.LME and CR0.PG are set, the processor is in
@@ -8736,7 +8737,7 @@ static int kvm_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
                        return -EINVAL;
        }
 
-       return 0;
+       return kvm_valid_cr4(vcpu, sregs->cr4);
 }
 
 static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
index b7375dc6898f3215966359a2de29a6b8fc9ce592..c126571e5e2ee8ea264627d8da540e7032bf5eb5 100644 (file)
@@ -113,8 +113,8 @@ static void delay_mwaitx(unsigned long __loops)
                __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
-                * AMD, like Intel, supports the EAX hint and EAX=0xf
-                * means, do not enter any deep C-state and we use it
+                * AMD, like Intel's MWAIT version, supports the EAX hint and
+                * EAX=0xf0 means, do not enter any deep C-state and we use it
                 * here in delay() to minimize wakeup latency.
                 */
                __mwaitx(MWAITX_DISABLE_CSTATES, delay, MWAITX_ECX_TIMER_ENABLE);
index c202e1b07e290c9f7bceeeb8a0c0deb887e990f6..425e025341db915fb712409826466c4095e3da53 100644 (file)
@@ -917,9 +917,6 @@ static void __init kexec_enter_virtual_mode(void)
 
        if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
                runtime_code_page_mkexec();
-
-       /* clean DUMMY object */
-       efi_delete_dummy_variable();
 #endif
 }
 
index 0d3365cb64de034ad2cd6e6cc299d8b07977a4de..a04551ee5568b958be51634346054e3b5b7c5c93 100644 (file)
@@ -57,19 +57,7 @@ static efi_system_table_t __init *xen_efi_probe(void)
                return NULL;
 
        /* Here we know that Xen runs on EFI platform. */
-
-       efi.get_time                 = xen_efi_get_time;
-       efi.set_time                 = xen_efi_set_time;
-       efi.get_wakeup_time          = xen_efi_get_wakeup_time;
-       efi.set_wakeup_time          = xen_efi_set_wakeup_time;
-       efi.get_variable             = xen_efi_get_variable;
-       efi.get_next_variable        = xen_efi_get_next_variable;
-       efi.set_variable             = xen_efi_set_variable;
-       efi.query_variable_info      = xen_efi_query_variable_info;
-       efi.update_capsule           = xen_efi_update_capsule;
-       efi.query_capsule_caps       = xen_efi_query_capsule_caps;
-       efi.get_next_high_mono_count = xen_efi_get_next_high_mono_count;
-       efi.reset_system             = xen_efi_reset_system;
+       xen_efi_runtime_setup();
 
        efi_systab_xen.tables = info->cfg.addr;
        efi_systab_xen.nr_tables = info->cfg.nent;
index 750f46ad018a0e0e500bf5dd60da2f7901a54f89..205b1176084f572e9438882bf2ee9ff13404d2cd 100644 (file)
@@ -269,19 +269,41 @@ void xen_reboot(int reason)
                BUG();
 }
 
+static int reboot_reason = SHUTDOWN_reboot;
+static bool xen_legacy_crash;
 void xen_emergency_restart(void)
 {
-       xen_reboot(SHUTDOWN_reboot);
+       xen_reboot(reboot_reason);
 }
 
 static int
 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
 {
-       if (!kexec_crash_loaded())
-               xen_reboot(SHUTDOWN_crash);
+       if (!kexec_crash_loaded()) {
+               if (xen_legacy_crash)
+                       xen_reboot(SHUTDOWN_crash);
+
+               reboot_reason = SHUTDOWN_crash;
+
+               /*
+                * If panic_timeout==0 then we are supposed to wait forever.
+                * However, to preserve original dom0 behavior we have to drop
+                * into hypervisor. (domU behavior is controlled by its
+                * config file)
+                */
+               if (panic_timeout == 0)
+                       panic_timeout = -1;
+       }
        return NOTIFY_DONE;
 }
 
+static int __init parse_xen_legacy_crash(char *arg)
+{
+       xen_legacy_crash = true;
+       return 0;
+}
+early_param("xen_legacy_crash", parse_xen_legacy_crash);
+
 static struct notifier_block xen_panic_block = {
        .notifier_call = xen_panic_event,
        .priority = INT_MIN
index 58f79ab32358429ea4f948ccccbdfe86e9e294e3..5bfea374a1609a590d9cae7b343b07f85e02bf6d 100644 (file)
@@ -117,6 +117,14 @@ static void __init xen_banner(void)
        printk(KERN_INFO "Xen version: %d.%d%s%s\n",
               version >> 16, version & 0xffff, extra.extraversion,
               xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
+
+#ifdef CONFIG_X86_32
+       pr_warn("WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n"
+               "Support for running as 32-bit PV-guest under Xen will soon be removed\n"
+               "from the Linux kernel!\n"
+               "Please use either a 64-bit kernel or switch to HVM or PVH mode!\n"
+               "WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! WARNING!\n");
+#endif
 }
 
 static void __init xen_pv_init_platform(void)
index a9dcd87b6eb1ee894fc540d173dcd77030ed54da..611b98a02a65e858bae4e5f9815a36eeab3c285c 100644 (file)
@@ -56,7 +56,7 @@
                reg = <0xf0100000 0x03f00000>;
 
                     // BUS_ADDRESS(3)  CPU_PHYSICAL(1)  SIZE(2)
-               ranges = <0x01000000 0x0 0xf0000000  0xf0000000  0x0 0x00010000>,
+               ranges = <0x01000000 0x0 0x00000000  0xf0000000  0x0 0x00010000>,
                         <0x02000000 0x0 0xf4000000  0xf4000000  0x0 0x08000000>;
 
                     // PCI_DEVICE(3)  INT#(1)  CONTROLLER(PHANDLE)  CONTROLLER_DATA(2)
index aeb15f4c755b379de5d0c0f62eba6eaab7d7229e..be8b2be5a98bd9b8972b88630d2ca978fe3130cc 100644 (file)
@@ -148,7 +148,7 @@ static inline void change_bit(unsigned int bit, volatile unsigned long *p)
                        "       getex   %0\n"
                        "       beqz    %0, 1b\n"
                        : "=&a" (tmp)
-                       : "a" (~mask), "a" (p)
+                       : "a" (mask), "a" (p)
                        : "memory");
 }
 
index 6792928ba84a7bb16d20a661f5070d0676e2f6b6..3f80386f18838902b1a96a32c3839cf9979ec436 100644 (file)
@@ -100,7 +100,7 @@ do {                                                                        \
        case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \
        case 8: {                                                       \
                     __typeof__(*ptr) __v64 = x;                        \
-                    retval = __copy_to_user(ptr, &__v64, 8);           \
+                    retval = __copy_to_user(ptr, &__v64, 8) ? -EFAULT : 0;     \
                     break;                                             \
                }                                                       \
        default: __put_user_bad();                                      \
@@ -132,14 +132,14 @@ do {                                                                      \
 #define __check_align_1  ""
 
 #define __check_align_2                                \
-       "   _bbci.l %3,  0, 1f          \n"     \
-       "   movi    %0, %4              \n"     \
+       "   _bbci.l %[addr], 0, 1f      \n"     \
+       "   movi    %[err], %[efault]   \n"     \
        "   _j      2f                  \n"
 
 #define __check_align_4                                \
-       "   _bbsi.l %3,  0, 0f          \n"     \
-       "   _bbci.l %3,  1, 1f          \n"     \
-       "0: movi    %0, %4              \n"     \
+       "   _bbsi.l %[addr], 0, 0f      \n"     \
+       "   _bbci.l %[addr], 1, 1f      \n"     \
+       "0: movi    %[err], %[efault]   \n"     \
        "   _j      2f                  \n"
 
 
@@ -151,40 +151,40 @@ do {                                                                      \
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __put_user_asm(x, addr, err, align, insn, cb)  \
+#define __put_user_asm(x_, addr_, err_, align, insn, cb)\
 __asm__ __volatile__(                                  \
        __check_align_##align                           \
-       "1: "insn"  %2, %3, 0           \n"             \
+       "1: "insn"  %[x], %[addr], 0    \n"             \
        "2:                             \n"             \
        "   .section  .fixup,\"ax\"     \n"             \
        "   .align 4                    \n"             \
        "   .literal_position           \n"             \
        "5:                             \n"             \
-       "   movi   %1, 2b               \n"             \
-       "   movi   %0, %4               \n"             \
-       "   jx     %1                   \n"             \
+       "   movi   %[tmp], 2b           \n"             \
+       "   movi   %[err], %[efault]    \n"             \
+       "   jx     %[tmp]               \n"             \
        "   .previous                   \n"             \
        "   .section  __ex_table,\"a\"  \n"             \
        "   .long       1b, 5b          \n"             \
        "   .previous"                                  \
-       :"=r" (err), "=r" (cb)                          \
-       :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err))
+       :[err] "+r"(err_), [tmp] "=r"(cb)               \
+       :[x] "r"(x_), [addr] "r"(addr_), [efault] "i"(-EFAULT))
 
 #define __get_user_nocheck(x, ptr, size)                       \
 ({                                                             \
-       long __gu_err, __gu_val;                                \
-       __get_user_size(__gu_val, (ptr), (size), __gu_err);     \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;             \
+       long __gu_err;                                          \
+       __get_user_size((x), (ptr), (size), __gu_err);          \
        __gu_err;                                               \
 })
 
 #define __get_user_check(x, ptr, size)                                 \
 ({                                                                     \
-       long __gu_err = -EFAULT, __gu_val = 0;                          \
+       long __gu_err = -EFAULT;                                        \
        const __typeof__(*(ptr)) *__gu_addr = (ptr);                    \
-       if (access_ok(__gu_addr, size))                 \
-               __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
+       if (access_ok(__gu_addr, size))                                 \
+               __get_user_size((x), __gu_addr, (size), __gu_err);      \
+       else                                                            \
+               (x) = 0;                                                \
        __gu_err;                                                       \
 })
 
@@ -198,8 +198,17 @@ do {                                                                       \
        case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb);  break;\
        case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\
        case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb);  break;\
-       case 8: retval = __copy_from_user(&x, ptr, 8);    break;        \
-       default: (x) = __get_user_bad();                                \
+       case 8: {                                                       \
+               u64 __x;                                                \
+               if (unlikely(__copy_from_user(&__x, ptr, 8))) {         \
+                       retval = -EFAULT;                               \
+                       (x) = 0;                                        \
+               } else {                                                \
+                       (x) = *(__force __typeof__((ptr)))&__x;         \
+               }                                                       \
+               break;                                                  \
+       }                                                               \
+       default: (x) = 0; __get_user_bad();                             \
        }                                                               \
 } while (0)
 
@@ -208,25 +217,28 @@ do {                                                                      \
  * WARNING: If you modify this macro at all, verify that the
  * __check_align_* macros still work.
  */
-#define __get_user_asm(x, addr, err, align, insn, cb) \
-__asm__ __volatile__(                  \
-       __check_align_##align                   \
-       "1: "insn"  %2, %3, 0           \n"     \
-       "2:                             \n"     \
-       "   .section  .fixup,\"ax\"     \n"     \
-       "   .align 4                    \n"     \
-       "   .literal_position           \n"     \
-       "5:                             \n"     \
-       "   movi   %1, 2b               \n"     \
-       "   movi   %2, 0                \n"     \
-       "   movi   %0, %4               \n"     \
-       "   jx     %1                   \n"     \
-       "   .previous                   \n"     \
-       "   .section  __ex_table,\"a\"  \n"     \
-       "   .long       1b, 5b          \n"     \
-       "   .previous"                          \
-       :"=r" (err), "=r" (cb), "=r" (x)        \
-       :"r" (addr), "i" (-EFAULT), "0" (err))
+#define __get_user_asm(x_, addr_, err_, align, insn, cb) \
+do {                                                   \
+       u32 __x = 0;                                    \
+       __asm__ __volatile__(                           \
+               __check_align_##align                   \
+               "1: "insn"  %[x], %[addr], 0    \n"     \
+               "2:                             \n"     \
+               "   .section  .fixup,\"ax\"     \n"     \
+               "   .align 4                    \n"     \
+               "   .literal_position           \n"     \
+               "5:                             \n"     \
+               "   movi   %[tmp], 2b           \n"     \
+               "   movi   %[err], %[efault]    \n"     \
+               "   jx     %[tmp]               \n"     \
+               "   .previous                   \n"     \
+               "   .section  __ex_table,\"a\"  \n"     \
+               "   .long       1b, 5b          \n"     \
+               "   .previous"                          \
+               :[err] "+r"(err_), [tmp] "=r"(cb), [x] "+r"(__x) \
+               :[addr] "r"(addr_), [efault] "i"(-EFAULT)); \
+       (x_) = (__force __typeof__(*(addr_)))__x;       \
+} while (0)
 
 
 /*
index 04f19de4670033142537a6271b749c30836ea47e..4092555828b13a25305f250863ad2e4d42016727 100644 (file)
@@ -119,13 +119,6 @@ EXPORT_SYMBOL(__invalidate_icache_range);
 // FIXME EXPORT_SYMBOL(screen_info);
 #endif
 
-EXPORT_SYMBOL(outsb);
-EXPORT_SYMBOL(outsw);
-EXPORT_SYMBOL(outsl);
-EXPORT_SYMBOL(insb);
-EXPORT_SYMBOL(insw);
-EXPORT_SYMBOL(insl);
-
 extern long common_exception_return;
 EXPORT_SYMBOL(common_exception_return);
 
index b6f20be0fc78c16a2984294386a25b2893f677fc..5d21027b1faf03b1a16bcd38b8670657c9f02380 100644 (file)
@@ -1362,7 +1362,7 @@ int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol)
 {
        struct blkg_policy_data *pd_prealloc = NULL;
-       struct blkcg_gq *blkg;
+       struct blkcg_gq *blkg, *pinned_blkg = NULL;
        int ret;
 
        if (blkcg_policy_enabled(q, pol))
@@ -1370,49 +1370,82 @@ int blkcg_activate_policy(struct request_queue *q,
 
        if (queue_is_mq(q))
                blk_mq_freeze_queue(q);
-pd_prealloc:
-       if (!pd_prealloc) {
-               pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, &blkcg_root);
-               if (!pd_prealloc) {
-                       ret = -ENOMEM;
-                       goto out_bypass_end;
-               }
-       }
-
+retry:
        spin_lock_irq(&q->queue_lock);
 
-       /* blkg_list is pushed at the head, reverse walk to init parents first */
+       /* blkg_list is pushed at the head, reverse walk to allocate parents first */
        list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) {
                struct blkg_policy_data *pd;
 
                if (blkg->pd[pol->plid])
                        continue;
 
-               pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, &blkcg_root);
-               if (!pd)
-                       swap(pd, pd_prealloc);
+               /* If prealloc matches, use it; otherwise try GFP_NOWAIT */
+               if (blkg == pinned_blkg) {
+                       pd = pd_prealloc;
+                       pd_prealloc = NULL;
+               } else {
+                       pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q,
+                                             blkg->blkcg);
+               }
+
                if (!pd) {
+                       /*
+                        * GFP_NOWAIT failed.  Free the existing one and
+                        * prealloc for @blkg w/ GFP_KERNEL.
+                        */
+                       if (pinned_blkg)
+                               blkg_put(pinned_blkg);
+                       blkg_get(blkg);
+                       pinned_blkg = blkg;
+
                        spin_unlock_irq(&q->queue_lock);
-                       goto pd_prealloc;
+
+                       if (pd_prealloc)
+                               pol->pd_free_fn(pd_prealloc);
+                       pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q,
+                                                      blkg->blkcg);
+                       if (pd_prealloc)
+                               goto retry;
+                       else
+                               goto enomem;
                }
 
                blkg->pd[pol->plid] = pd;
                pd->blkg = blkg;
                pd->plid = pol->plid;
-               if (pol->pd_init_fn)
-                       pol->pd_init_fn(pd);
        }
 
+       /* all allocated, init in the same order */
+       if (pol->pd_init_fn)
+               list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+                       pol->pd_init_fn(blkg->pd[pol->plid]);
+
        __set_bit(pol->plid, q->blkcg_pols);
        ret = 0;
 
        spin_unlock_irq(&q->queue_lock);
-out_bypass_end:
+out:
        if (queue_is_mq(q))
                blk_mq_unfreeze_queue(q);
+       if (pinned_blkg)
+               blkg_put(pinned_blkg);
        if (pd_prealloc)
                pol->pd_free_fn(pd_prealloc);
        return ret;
+
+enomem:
+       /* alloc failed, nothing's initialized yet, free everything */
+       spin_lock_irq(&q->queue_lock);
+       list_for_each_entry(blkg, &q->blkg_list, q_node) {
+               if (blkg->pd[pol->plid]) {
+                       pol->pd_free_fn(blkg->pd[pol->plid]);
+                       blkg->pd[pol->plid] = NULL;
+               }
+       }
+       spin_unlock_irq(&q->queue_lock);
+       ret = -ENOMEM;
+       goto out;
 }
 EXPORT_SYMBOL_GPL(blkcg_activate_policy);
 
index 2a3db80c1dce7f97c65229510d078e3553615fc4..a7ed434eae0387b92d41009b0cc52f3522ed556c 100644 (file)
@@ -2110,10 +2110,10 @@ static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
                        goto einval;
        }
 
-       spin_lock_irq(&iocg->ioc->lock);
+       spin_lock(&iocg->ioc->lock);
        iocg->cfg_weight = v;
        weight_updated(iocg);
-       spin_unlock_irq(&iocg->ioc->lock);
+       spin_unlock(&iocg->ioc->lock);
 
        blkg_conf_finish(&ctx);
        return nbytes;
index 6e3b15f70cd7ae651bfd149115364135312aeaf2..ec791156e9ccd1b17cc9c07b98f55b08f2a705de 100644 (file)
@@ -1992,10 +1992,14 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                /* bypass scheduler for flush rq */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
-       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs)) {
+       } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
+                               !blk_queue_nonrot(q))) {
                /*
                 * Use plugging if we have a ->commit_rqs() hook as well, as
                 * we know the driver uses bd->last in a smart fashion.
+                *
+                * Use normal plugging if this disk is slow HDD, as sequential
+                * IO may benefit a lot from plug merging.
                 */
                unsigned int request_count = plug->rq_count;
                struct request *last = NULL;
@@ -2012,6 +2016,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
 
                blk_add_rq_to_plug(plug, rq);
+       } else if (q->elevator) {
+               blk_mq_sched_insert_request(rq, false, true, true);
        } else if (plug && !blk_queue_nomerges(q)) {
                /*
                 * We do limited plugging. If the bio can be merged, do that.
@@ -2035,8 +2041,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                        blk_mq_try_issue_directly(data.hctx, same_queue_rq,
                                        &cookie);
                }
-       } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
-                       !data.hctx->dispatch_busy)) {
+       } else if ((q->nr_hw_queues > 1 && is_sync) ||
+                       !data.hctx->dispatch_busy) {
                blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
                blk_mq_sched_insert_request(rq, false, true, true);
index 61b635bc2a3181be4432513d2c4a6a177d36e3ca..656460636ad34a176297bfb991aba977b3c0e9ca 100644 (file)
@@ -160,24 +160,27 @@ bool rq_depth_calc_max_depth(struct rq_depth *rqd)
        return ret;
 }
 
-void rq_depth_scale_up(struct rq_depth *rqd)
+/* Returns true on success and false if scaling up wasn't possible */
+bool rq_depth_scale_up(struct rq_depth *rqd)
 {
        /*
         * Hit max in previous round, stop here
         */
        if (rqd->scaled_max)
-               return;
+               return false;
 
        rqd->scale_step--;
 
        rqd->scaled_max = rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 /*
  * Scale rwb down. If 'hard_throttle' is set, do it quicker, since we
- * had a latency violation.
+ * had a latency violation. Returns true on success and returns false if
+ * scaling down wasn't possible.
  */
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 {
        /*
         * Stop scaling down when we've hit the limit. This also prevents
@@ -185,7 +188,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
         * keep up.
         */
        if (rqd->max_depth == 1)
-               return;
+               return false;
 
        if (rqd->scale_step < 0 && hard_throttle)
                rqd->scale_step = 0;
@@ -194,6 +197,7 @@ void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle)
 
        rqd->scaled_max = false;
        rq_depth_calc_max_depth(rqd);
+       return true;
 }
 
 struct rq_qos_wait_data {
index 08a09dbe0f4bfb39eb60f543f2e00e15fb72ab86..2bc43e94f4c407582087e328d892f0c5d1e4da86 100644 (file)
@@ -108,16 +108,13 @@ static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
 
 static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
 {
-       struct rq_qos *cur, *prev = NULL;
-       for (cur = q->rq_qos; cur; cur = cur->next) {
-               if (cur == rqos) {
-                       if (prev)
-                               prev->next = rqos->next;
-                       else
-                               q->rq_qos = cur;
+       struct rq_qos **cur;
+
+       for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
+               if (*cur == rqos) {
+                       *cur = rqos->next;
                        break;
                }
-               prev = cur;
        }
 
        blk_mq_debugfs_unregister_rqos(rqos);
@@ -130,8 +127,8 @@ void rq_qos_wait(struct rq_wait *rqw, void *private_data,
                 acquire_inflight_cb_t *acquire_inflight_cb,
                 cleanup_cb_t *cleanup_cb);
 bool rq_wait_inc_below(struct rq_wait *rq_wait, unsigned int limit);
-void rq_depth_scale_up(struct rq_depth *rqd);
-void rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
+bool rq_depth_scale_up(struct rq_depth *rqd);
+bool rq_depth_scale_down(struct rq_depth *rqd, bool hard_throttle);
 bool rq_depth_calc_max_depth(struct rq_depth *rqd);
 
 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
index 8af553a0ba00daaf09994be44c7c71aca9a3bfdc..8641ba9793c5fb0831eebb5743935771b38da5ac 100644 (file)
@@ -308,7 +308,8 @@ static void calc_wb_limits(struct rq_wb *rwb)
 
 static void scale_up(struct rq_wb *rwb)
 {
-       rq_depth_scale_up(&rwb->rq_depth);
+       if (!rq_depth_scale_up(&rwb->rq_depth))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_wake_all(rwb);
@@ -317,7 +318,8 @@ static void scale_up(struct rq_wb *rwb)
 
 static void scale_down(struct rq_wb *rwb, bool hard_throttle)
 {
-       rq_depth_scale_down(&rwb->rq_depth, hard_throttle);
+       if (!rq_depth_scale_down(&rwb->rq_depth, hard_throttle))
+               return;
        calc_wb_limits(rwb);
        rwb->unknown_cnt = 0;
        rwb_trace_step(rwb, "scale down");
index 5437059c92615ea9c474342d31ad4348e788bf19..076ba7308e65c34a8e4abd1df484d7388b8a4494 100644 (file)
@@ -616,7 +616,8 @@ out:
 
 static inline bool elv_support_iosched(struct request_queue *q)
 {
-       if (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))
+       if (!q->mq_ops ||
+           (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED)))
                return false;
        return true;
 }
index 4e95a9792162582d5a4c7bd4fc48f95a5135fcec..b4c761973ac102fda9f75a3f92c56e77f5a718cd 100644 (file)
@@ -129,7 +129,7 @@ static const u8 opaluid[][OPAL_UID_LENGTH] = {
                { 0x00, 0x00, 0x00, 0x09, 0x00, 0x00, 0x84, 0x01 },
 
        /* tables */
-       [OPAL_TABLE_TABLE]
+       [OPAL_TABLE_TABLE] =
                { 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01 },
        [OPAL_LOCKINGRANGE_GLOBAL] =
                { 0x00, 0x00, 0x08, 0x02, 0x00, 0x00, 0x00, 0x01 },
@@ -372,8 +372,8 @@ static void check_geometry(struct opal_dev *dev, const void *data)
 {
        const struct d0_geometry_features *geo = data;
 
-       dev->align = geo->alignment_granularity;
-       dev->lowest_lba = geo->lowest_aligned_lba;
+       dev->align = be64_to_cpu(geo->alignment_granularity);
+       dev->lowest_lba = be64_to_cpu(geo->lowest_aligned_lba);
 }
 
 static int execute_step(struct opal_dev *dev,
index 3b2525908dd8cfaed9074d7926d2036102e77f34..a1a858ad4d18d57117eaf3e2a65ca26985754f0a 100644 (file)
@@ -905,8 +905,8 @@ void acpi_cppc_processor_exit(struct acpi_processor *pr)
                        pcc_data[pcc_ss_id]->refcount--;
                        if (!pcc_data[pcc_ss_id]->refcount) {
                                pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
-                               pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
                                kfree(pcc_data[pcc_ss_id]);
+                               pcc_data[pcc_ss_id] = NULL;
                        }
                }
        }
index 8f9a28a870b0c5a4315c683abe7a67b0da28a664..8b0de8a3c64709652ae327b8d19b9d0d141d856a 100644 (file)
@@ -403,7 +403,7 @@ static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *heade
                pr_info("HMAT: Memory Flags:%04x Processor Domain:%d Memory Domain:%d\n",
                        p->flags, p->processor_PD, p->memory_PD);
 
-       if (p->flags & ACPI_HMAT_MEMORY_PD_VALID) {
+       if (p->flags & ACPI_HMAT_MEMORY_PD_VALID && hmat_revision == 1) {
                target = find_mem_target(p->memory_PD);
                if (!target) {
                        pr_debug("HMAT: Memory Domain missing from SRAT\n");
index 1413324982f0d7628068711e61e598093b88d320..14e68f202f8109dfce44d19eee7274d68d1552bc 100644 (file)
@@ -1322,7 +1322,7 @@ static ssize_t scrub_show(struct device *dev,
        nfit_device_lock(dev);
        nd_desc = dev_get_drvdata(dev);
        if (!nd_desc) {
-               device_unlock(dev);
+               nfit_device_unlock(dev);
                return rc;
        }
        acpi_desc = to_acpi_desc(nd_desc);
index 08da9c29f1e9a8fe722e5bfc7895357b5ec3277e..62114a03a51aa134dbf5254f105392c60c27a550 100644 (file)
@@ -290,14 +290,13 @@ static int acpi_processor_notifier(struct notifier_block *nb,
                                   unsigned long event, void *data)
 {
        struct cpufreq_policy *policy = data;
-       int cpu = policy->cpu;
 
        if (event == CPUFREQ_CREATE_POLICY) {
-               acpi_thermal_cpufreq_init(cpu);
-               acpi_processor_ppc_init(cpu);
+               acpi_thermal_cpufreq_init(policy);
+               acpi_processor_ppc_init(policy);
        } else if (event == CPUFREQ_REMOVE_POLICY) {
-               acpi_processor_ppc_exit(cpu);
-               acpi_thermal_cpufreq_exit(cpu);
+               acpi_processor_ppc_exit(policy);
+               acpi_thermal_cpufreq_exit(policy);
        }
 
        return 0;
index 2261713d1aec08ffe2f006ff8fd69c165b32a6de..5909e8fa4013f2c6c831b1a28d9d69ca4cec98fb 100644 (file)
@@ -81,10 +81,10 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
        pr->performance_platform_limit = (int)ppc;
 
        if (ppc >= pr->performance->state_count ||
-           unlikely(!dev_pm_qos_request_active(&pr->perflib_req)))
+           unlikely(!freq_qos_request_active(&pr->perflib_req)))
                return 0;
 
-       ret = dev_pm_qos_update_request(&pr->perflib_req,
+       ret = freq_qos_update_request(&pr->perflib_req,
                        pr->performance->states[ppc].core_frequency * 1000);
        if (ret < 0) {
                pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n",
@@ -157,26 +157,36 @@ void acpi_processor_ignore_ppc_init(void)
                ignore_ppc = 0;
 }
 
-void acpi_processor_ppc_init(int cpu)
+void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
+       unsigned int cpu;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->perflib_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0) {
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
-               return;
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
+
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->perflib_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
        }
 }
 
-void acpi_processor_ppc_exit(int cpu)
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
 
-       dev_pm_qos_remove_request(&pr->perflib_req);
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+
+               if (pr)
+                       freq_qos_remove_request(&pr->perflib_req);
+       }
 }
 
 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
index ec2638f1df4faea14e10a0682887bdce2d3e91e1..41feb88ee92d69380af6e4d30c92199e3e4dd4d1 100644 (file)
@@ -105,7 +105,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                pr = per_cpu(processors, i);
 
-               if (unlikely(!dev_pm_qos_request_active(&pr->thermal_req)))
+               if (unlikely(!freq_qos_request_active(&pr->thermal_req)))
                        continue;
 
                policy = cpufreq_cpu_get(i);
@@ -116,7 +116,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
 
                cpufreq_cpu_put(policy);
 
-               ret = dev_pm_qos_update_request(&pr->thermal_req, max_freq);
+               ret = freq_qos_update_request(&pr->thermal_req, max_freq);
                if (ret < 0) {
                        pr_warn("Failed to update thermal freq constraint: CPU%d (%d)\n",
                                pr->id, ret);
@@ -125,26 +125,36 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
        return 0;
 }
 
-void acpi_thermal_cpufreq_init(int cpu)
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
-       int ret;
-
-       ret = dev_pm_qos_add_request(get_cpu_device(cpu),
-                                    &pr->thermal_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    INT_MAX);
-       if (ret < 0) {
-               pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu,
-                      ret);
-               return;
+       unsigned int cpu;
+
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, cpu);
+               int ret;
+
+               if (!pr)
+                       continue;
+
+               ret = freq_qos_add_request(&policy->constraints,
+                                          &pr->thermal_req,
+                                          FREQ_QOS_MAX, INT_MAX);
+               if (ret < 0)
+                       pr_err("Failed to add freq constraint for CPU%d (%d)\n",
+                              cpu, ret);
        }
 }
 
-void acpi_thermal_cpufreq_exit(int cpu)
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
-       struct acpi_processor *pr = per_cpu(processors, cpu);
+       unsigned int cpu;
+
+       for_each_cpu(cpu, policy->related_cpus) {
+               struct acpi_processor *pr = per_cpu(processors, policy->cpu);
 
-       dev_pm_qos_remove_request(&pr->thermal_req);
+               if (pr)
+                       freq_qos_remove_request(&pr->thermal_req);
+       }
 }
 #else                          /* ! CONFIG_CPU_FREQ */
 static int cpufreq_get_max_state(unsigned int cpu)
index 9fa77d72ef27f52369a23f9ed99788705661bc3a..2af937a8b1c5c8b5b621f6e75f7206feab9fe996 100644 (file)
@@ -361,19 +361,6 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = {
                DMI_MATCH(DMI_PRODUCT_NAME, "80E3"),
                },
        },
-       /*
-        * https://bugzilla.kernel.org/show_bug.cgi?id=196907
-        * Some Dell XPS13 9360 cannot do suspend-to-idle using the Low Power
-        * S0 Idle firmware interface.
-        */
-       {
-       .callback = init_default_s3,
-       .ident = "Dell XPS13 9360",
-       .matches = {
-               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
-               DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
-               },
-       },
        /*
         * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using
         * the Low Power S0 Idle firmware interface (see
index f39f075abff9f71bac1f688db5483017957ad88c..fe1523664816a49712c88b4afcdc9ee65011b955 100644 (file)
@@ -409,9 +409,11 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
                 */
                rstc = of_reset_control_array_get_optional_shared(dev->dev.of_node);
                if (IS_ERR(rstc)) {
-                       if (PTR_ERR(rstc) != -EPROBE_DEFER)
-                               dev_err(&dev->dev, "Can't get amba reset!\n");
-                       return PTR_ERR(rstc);
+                       ret = PTR_ERR(rstc);
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(&dev->dev, "can't get reset: %d\n",
+                                       ret);
+                       goto err_reset;
                }
                reset_control_deassert(rstc);
                reset_control_put(rstc);
@@ -472,6 +474,12 @@ static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
        release_resource(&dev->res);
  err_out:
        return ret;
+
+ err_reset:
+       amba_put_disable_pclk(dev);
+       iounmap(tmp);
+       dev_pm_domain_detach(&dev->dev, true);
+       goto err_release;
 }
 
 /*
index c0a491277acac5a6310d537de3af555f8ff71579..265d9dd46a5ebb6610bf826ed0fa620b55c289a6 100644 (file)
@@ -57,6 +57,7 @@
 #include <linux/sched/signal.h>
 #include <linux/sched/mm.h>
 #include <linux/seq_file.h>
+#include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/pid_namespace.h>
 #include <linux/security.h>
@@ -66,6 +67,7 @@
 #include <linux/task_work.h>
 
 #include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
 
 #include <asm/cacheflush.h>
 
@@ -95,10 +97,6 @@ DEFINE_SHOW_ATTRIBUTE(proc);
 #define SZ_1K                               0x400
 #endif
 
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
 
 enum {
@@ -2876,7 +2874,7 @@ static void binder_transaction(struct binder_proc *proc,
        e->target_handle = tr->target.handle;
        e->data_size = tr->data_size;
        e->offsets_size = tr->offsets_size;
-       e->context_name = proc->context->name;
+       strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
 
        if (reply) {
                binder_inner_proc_lock(proc);
@@ -5175,9 +5173,6 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
        if (proc->tsk != current->group_leader)
                return -EINVAL;
 
-       if ((vma->vm_end - vma->vm_start) > SZ_4M)
-               vma->vm_end = vma->vm_start + SZ_4M;
-
        binder_debug(BINDER_DEBUG_OPEN_CLOSE,
                     "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
                     __func__, proc->pid, vma->vm_start, vma->vm_end,
index 6d79a1b0d44630f79ac16df744e2e9d758165e2f..eb76a823fbb200f008b0e4c6ad02bc5818929857 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cacheflush.h>
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
+#include <linux/sizes.h>
 #include "binder_alloc.h"
 #include "binder_trace.h"
 
@@ -156,7 +157,7 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked(
 }
 
 /**
- * binder_alloc_buffer_lookup() - get buffer given user ptr
+ * binder_alloc_prepare_to_free() - get buffer given user ptr
  * @alloc:     binder_alloc for this proc
  * @user_ptr:  User pointer to buffer data
  *
@@ -689,7 +690,9 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
        alloc->buffer = (void __user *)vma->vm_start;
        mutex_unlock(&binder_alloc_mmap_lock);
 
-       alloc->pages = kcalloc((vma->vm_end - vma->vm_start) / PAGE_SIZE,
+       alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
+                                  SZ_4M);
+       alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
                               sizeof(alloc->pages[0]),
                               GFP_KERNEL);
        if (alloc->pages == NULL) {
@@ -697,7 +700,6 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
                failure_string = "alloc page array";
                goto err_alloc_pages_failed;
        }
-       alloc->buffer_size = vma->vm_end - vma->vm_start;
 
        buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
        if (!buffer) {
index bd47f7f72075a9676e84380d7c2f9f875878ebca..ae991097d14d17068f1512281704b0f04300c7d0 100644 (file)
@@ -130,7 +130,7 @@ struct binder_transaction_log_entry {
        int return_error_line;
        uint32_t return_error;
        uint32_t return_error_param;
-       const char *context_name;
+       char context_name[BINDERFS_MAX_NAME + 1];
 };
 
 struct binder_transaction_log {
index dd92faf197d5ed81a8a7c0fcbc0d39a73be5b977..05c2b32dcc4dcd97a15d7ffb0ea4896caf54c5b3 100644 (file)
@@ -1600,7 +1600,9 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
         */
        if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
                return;
-       if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
+
+       /* Skip applying the quirk on Denverton and beyond */
+       if (((enum board_ids) id->driver_data) >= board_ahci_pcs7)
                return;
 
        /*
index e742780950de84c330c2c83b1daf1a0905f03ed0..8befce036af84810b0a3c815829b87f6fd904d8c 100644 (file)
@@ -153,17 +153,13 @@ int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv)
 {
        int rc, i;
 
-       if (hpriv->ahci_regulator) {
-               rc = regulator_enable(hpriv->ahci_regulator);
-               if (rc)
-                       return rc;
-       }
+       rc = regulator_enable(hpriv->ahci_regulator);
+       if (rc)
+               return rc;
 
-       if (hpriv->phy_regulator) {
-               rc = regulator_enable(hpriv->phy_regulator);
-               if (rc)
-                       goto disable_ahci_pwrs;
-       }
+       rc = regulator_enable(hpriv->phy_regulator);
+       if (rc)
+               goto disable_ahci_pwrs;
 
        for (i = 0; i < hpriv->nports; i++) {
                if (!hpriv->target_pwrs[i])
@@ -181,11 +177,9 @@ disable_target_pwrs:
                if (hpriv->target_pwrs[i])
                        regulator_disable(hpriv->target_pwrs[i]);
 
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->phy_regulator);
 disable_ahci_pwrs:
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->ahci_regulator);
        return rc;
 }
 EXPORT_SYMBOL_GPL(ahci_platform_enable_regulators);
@@ -207,10 +201,8 @@ void ahci_platform_disable_regulators(struct ahci_host_priv *hpriv)
                regulator_disable(hpriv->target_pwrs[i]);
        }
 
-       if (hpriv->ahci_regulator)
-               regulator_disable(hpriv->ahci_regulator);
-       if (hpriv->phy_regulator)
-               regulator_disable(hpriv->phy_regulator);
+       regulator_disable(hpriv->ahci_regulator);
+       regulator_disable(hpriv->phy_regulator);
 }
 EXPORT_SYMBOL_GPL(ahci_platform_disable_regulators);
 /**
@@ -359,7 +351,7 @@ static int ahci_platform_get_regulator(struct ahci_host_priv *hpriv, u32 port,
        struct regulator *target_pwr;
        int rc = 0;
 
-       target_pwr = regulator_get_optional(dev, "target");
+       target_pwr = regulator_get(dev, "target");
 
        if (!IS_ERR(target_pwr))
                hpriv->target_pwrs[port] = target_pwr;
@@ -436,16 +428,14 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
                hpriv->clks[i] = clk;
        }
 
-       hpriv->ahci_regulator = devm_regulator_get_optional(dev, "ahci");
+       hpriv->ahci_regulator = devm_regulator_get(dev, "ahci");
        if (IS_ERR(hpriv->ahci_regulator)) {
                rc = PTR_ERR(hpriv->ahci_regulator);
-               if (rc == -EPROBE_DEFER)
+               if (rc != 0)
                        goto err_out;
-               rc = 0;
-               hpriv->ahci_regulator = NULL;
        }
 
-       hpriv->phy_regulator = devm_regulator_get_optional(dev, "phy");
+       hpriv->phy_regulator = devm_regulator_get(dev, "phy");
        if (IS_ERR(hpriv->phy_regulator)) {
                rc = PTR_ERR(hpriv->phy_regulator);
                if (rc == -EPROBE_DEFER)
index 76d0f9de767bcec3d19b59a390cf95d9954c4025..58e09ffe8b9cbe034f066d22747446e25d10d3f3 100644 (file)
@@ -4791,27 +4791,6 @@ void ata_scsi_hotplug(struct work_struct *work)
                return;
        }
 
-       /*
-        * XXX - UGLY HACK
-        *
-        * The block layer suspend/resume path is fundamentally broken due
-        * to freezable kthreads and workqueue and may deadlock if a block
-        * device gets removed while resume is in progress.  I don't know
-        * what the solution is short of removing freezable kthreads and
-        * workqueues altogether.
-        *
-        * The following is an ugly hack to avoid kicking off device
-        * removal while freezer is active.  This is a joke but does avoid
-        * this particular deadlock scenario.
-        *
-        * https://bugzilla.kernel.org/show_bug.cgi?id=62801
-        * http://marc.info/?l=linux-kernel&m=138695698516487
-        */
-#ifdef CONFIG_FREEZER
-       while (pm_freezing)
-               msleep(10);
-#endif
-
        DPRINTK("ENTER\n");
        mutex_lock(&ap->scsi_scan_mutex);
 
index 2db62d98e39505b3466275d41bbb2c5a2cc9017e..7bd9cd366d41193638c858076755eec4ab1c67e6 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/acpi.h>
+#include <linux/cpufreq.h>
 #include <linux/device.h>
 #include <linux/err.h>
 #include <linux/fwnode.h>
@@ -3179,6 +3180,8 @@ void device_shutdown(void)
        wait_for_device_probe();
        device_block_probing();
 
+       cpufreq_suspend();
+
        spin_lock(&devices_kset->list_lock);
        /*
         * Walk the devices list backward, shutting down each in turn.
index 6bea4f3f8040dd77c87cf34647b26515061a76a1..55907c27075b13295c05ed197fa7f087546452d1 100644 (file)
@@ -540,6 +540,9 @@ static ssize_t soft_offline_page_store(struct device *dev,
        pfn >>= PAGE_SHIFT;
        if (!pfn_valid(pfn))
                return -ENXIO;
+       /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
+       if (!pfn_to_online_page(pfn))
+               return -EIO;
        ret = soft_offline_page(pfn_to_page(pfn), 0);
        return ret == 0 ? count : ret;
 }
index b6c6c7d97d5b2aa388baa153550ed7a4db3af022..b230beb6ccb41ba8beb5fbaf87972f69b94a33e3 100644 (file)
@@ -241,12 +241,8 @@ struct resource *platform_get_resource_byname(struct platform_device *dev,
 }
 EXPORT_SYMBOL_GPL(platform_get_resource_byname);
 
-/**
- * platform_get_irq_byname - get an IRQ for a device by name
- * @dev: platform device
- * @name: IRQ name
- */
-int platform_get_irq_byname(struct platform_device *dev, const char *name)
+static int __platform_get_irq_byname(struct platform_device *dev,
+                                    const char *name)
 {
        struct resource *r;
 
@@ -262,11 +258,47 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
        if (r)
                return r->start;
 
-       dev_err(&dev->dev, "IRQ %s not found\n", name);
        return -ENXIO;
 }
+
+/**
+ * platform_get_irq_byname - get an IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an IRQ like platform_get_irq(), but then by name rather then by index.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname(struct platform_device *dev, const char *name)
+{
+       int ret;
+
+       ret = __platform_get_irq_byname(dev, name);
+       if (ret < 0 && ret != -EPROBE_DEFER)
+               dev_err(&dev->dev, "IRQ %s not found\n", name);
+
+       return ret;
+}
 EXPORT_SYMBOL_GPL(platform_get_irq_byname);
 
+/**
+ * platform_get_irq_byname_optional - get an optional IRQ for a device by name
+ * @dev: platform device
+ * @name: IRQ name
+ *
+ * Get an optional IRQ by name like platform_get_irq_byname(). Except that it
+ * does not print an error message if an IRQ can not be obtained.
+ *
+ * Return: IRQ number on success, negative error number on failure.
+ */
+int platform_get_irq_byname_optional(struct platform_device *dev,
+                                    const char *name)
+{
+       return __platform_get_irq_byname(dev, name);
+}
+EXPORT_SYMBOL_GPL(platform_get_irq_byname_optional);
+
 /**
  * platform_add_devices - add a numbers of platform devices
  * @devs: array of platform devices to add
index 6c90fd7e2ff8f385a7c00fa6a36bf4d3a344b02d..350dcafd751f8e6c352f5e68945103da7cd84227 100644 (file)
@@ -115,20 +115,10 @@ s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
 
        spin_lock_irqsave(&dev->power.lock, flags);
 
-       switch (type) {
-       case DEV_PM_QOS_RESUME_LATENCY:
+       if (type == DEV_PM_QOS_RESUME_LATENCY) {
                ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
                        : pm_qos_read_value(&qos->resume_latency);
-               break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->min_frequency);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
-                       : pm_qos_read_value(&qos->max_frequency);
-               break;
-       default:
+       } else {
                WARN_ON(1);
                ret = 0;
        }
@@ -169,14 +159,6 @@ static int apply_constraint(struct dev_pm_qos_request *req,
                        req->dev->power.set_latency_tolerance(req->dev, value);
                }
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = pm_qos_update_target(&qos->min_frequency,
-                                          &req->data.pnode, action, value);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = pm_qos_update_target(&qos->max_frequency,
-                                          &req->data.pnode, action, value);
-               break;
        case DEV_PM_QOS_FLAGS:
                ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
                                          action, value);
@@ -227,24 +209,6 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
        c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
        c->type = PM_QOS_MIN;
 
-       c = &qos->min_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MAX;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
-       c = &qos->max_frequency;
-       plist_head_init(&c->list);
-       c->target_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->default_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->no_constraint_value = PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
-       c->type = PM_QOS_MIN;
-       c->notifiers = ++n;
-       BLOCKING_INIT_NOTIFIER_HEAD(n);
-
        INIT_LIST_HEAD(&qos->flags.list);
 
        spin_lock_irq(&dev->power.lock);
@@ -305,18 +269,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
                memset(req, 0, sizeof(*req));
        }
 
-       c = &qos->min_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
-       c = &qos->max_frequency;
-       plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
-               apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
-               memset(req, 0, sizeof(*req));
-       }
-
        f = &qos->flags;
        list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
                apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -428,8 +380,6 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
        switch(req->type) {
        case DEV_PM_QOS_RESUME_LATENCY:
        case DEV_PM_QOS_LATENCY_TOLERANCE:
-       case DEV_PM_QOS_MIN_FREQUENCY:
-       case DEV_PM_QOS_MAX_FREQUENCY:
                curr_value = req->data.pnode.prio;
                break;
        case DEV_PM_QOS_FLAGS:
@@ -557,14 +507,6 @@ int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
                ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
                                                       notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->min_frequency.notifiers,
-                                                      notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_register(dev->power.qos->max_frequency.notifiers,
-                                                      notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
@@ -604,14 +546,6 @@ int dev_pm_qos_remove_notifier(struct device *dev,
                ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
                                                         notifier);
                break;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->min_frequency.notifiers,
-                                                        notifier);
-               break;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               ret = blocking_notifier_chain_unregister(dev->power.qos->max_frequency.notifiers,
-                                                        notifier);
-               break;
        default:
                WARN_ON(1);
                ret = -EINVAL;
index 1410fa89365389a62f42782b3ef89dfb23fa1600..f6f77eaa7217e0df51b51047ca3c734a001d0479 100644 (file)
@@ -994,6 +994,16 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
        if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
                blk_queue_write_cache(lo->lo_queue, true, false);
 
+       if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev) {
+               /* In case of direct I/O, match underlying block size */
+               unsigned short bsize = bdev_logical_block_size(
+                       inode->i_sb->s_bdev);
+
+               blk_queue_logical_block_size(lo->lo_queue, bsize);
+               blk_queue_physical_block_size(lo->lo_queue, bsize);
+               blk_queue_io_min(lo->lo_queue, bsize);
+       }
+
        loop_update_rotational(lo);
        loop_update_dio(lo);
        set_capacity(lo->lo_disk, size);
index ac07e8c94c79ba7814d8c331325a18eb29d65490..a94ee45440b3b6443ab9921d6a7e2e250e9146bf 100644 (file)
@@ -248,8 +248,8 @@ static void nbd_put(struct nbd_device *nbd)
        if (refcount_dec_and_mutex_lock(&nbd->refs,
                                        &nbd_index_mutex)) {
                idr_remove(&nbd_index_idr, nbd->index);
-               mutex_unlock(&nbd_index_mutex);
                nbd_dev_remove(nbd);
+               mutex_unlock(&nbd_index_mutex);
        }
 }
 
@@ -385,17 +385,16 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
        struct nbd_device *nbd = cmd->nbd;
        struct nbd_config *config;
 
+       if (!mutex_trylock(&cmd->lock))
+               return BLK_EH_RESET_TIMER;
+
        if (!refcount_inc_not_zero(&nbd->config_refs)) {
                cmd->status = BLK_STS_TIMEOUT;
+               mutex_unlock(&cmd->lock);
                goto done;
        }
        config = nbd->config;
 
-       if (!mutex_trylock(&cmd->lock)) {
-               nbd_config_put(nbd);
-               return BLK_EH_RESET_TIMER;
-       }
-
        if (config->num_connections > 1) {
                dev_err_ratelimited(nbd_to_dev(nbd),
                                    "Connection timed out, retrying (%d/%d alive)\n",
@@ -711,6 +710,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
                ret = -ENOENT;
                goto out;
        }
+       if (cmd->status != BLK_STS_OK) {
+               dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
+                       req);
+               ret = -ENOENT;
+               goto out;
+       }
        if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
                dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
                        req);
@@ -792,7 +797,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
 {
        struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
 
+       mutex_lock(&cmd->lock);
        cmd->status = BLK_STS_IOERR;
+       mutex_unlock(&cmd->lock);
+
        blk_mq_complete_request(req);
        return true;
 }
@@ -972,6 +980,25 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
        return ret;
 }
 
+static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
+                                    int *err)
+{
+       struct socket *sock;
+
+       *err = 0;
+       sock = sockfd_lookup(fd, err);
+       if (!sock)
+               return NULL;
+
+       if (sock->ops->shutdown == sock_no_shutdown) {
+               dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
+               *err = -EINVAL;
+               return NULL;
+       }
+
+       return sock;
+}
+
 static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
                          bool netlink)
 {
@@ -981,7 +1008,7 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
        struct nbd_sock *nsock;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
@@ -1033,7 +1060,7 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
        int i;
        int err;
 
-       sock = sockfd_lookup(arg, &err);
+       sock = nbd_get_socket(nbd, arg, &err);
        if (!sock)
                return err;
 
index eabc116832a7ea47f22d4634926b521ce7206002..3d7fdea872f88d2f7337b66bafdce2a3c5051417 100644 (file)
@@ -142,8 +142,7 @@ static blk_status_t null_zone_reset(struct nullb_cmd *cmd, sector_t sector)
                zone->wp = zone->start;
                break;
        default:
-               cmd->error = BLK_STS_NOTSUPP;
-               break;
+               return BLK_STS_NOTSUPP;
        }
        return BLK_STS_OK;
 }
index 7c4350c0fb77e6ba5366bf4455ecb8f410484499..39136675dae5b381241a3ae2acce1cd93e2cfa39 100644 (file)
@@ -6639,10 +6639,13 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
        queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
        ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
                            ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
-       if (ret > 0)
+       if (ret > 0) {
                ret = rbd_dev->acquire_err;
-       else if (!ret)
-               ret = -ETIMEDOUT;
+       } else {
+               cancel_delayed_work_sync(&rbd_dev->lock_dwork);
+               if (!ret)
+                       ret = -ETIMEDOUT;
+       }
 
        if (ret) {
                rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
index d58a359a66225f39682c067739eb9843bae36b80..4285e75e52c3424b7aa2063cd202509647c5d20a 100644 (file)
@@ -413,13 +413,14 @@ static void reset_bdev(struct zram *zram)
 static ssize_t backing_dev_show(struct device *dev,
                struct device_attribute *attr, char *buf)
 {
+       struct file *file;
        struct zram *zram = dev_to_zram(dev);
-       struct file *file = zram->backing_dev;
        char *p;
        ssize_t ret;
 
        down_read(&zram->init_lock);
-       if (!zram->backing_dev) {
+       file = zram->backing_dev;
+       if (!file) {
                memcpy(buf, "none\n", 5);
                up_read(&zram->init_lock);
                return 5;
index ad50efb470aaa296bdc9cae1ba7a3ea321ddcf1c..2b6670daf7fc70cd58ac287785d06990bf1cb61e 100644 (file)
@@ -74,6 +74,7 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
  * @clk_disable_quirk: module specific clock disable quirk
  * @reset_done_quirk: module specific reset done quirk
  * @module_enable_quirk: module specific enable quirk
+ * @module_disable_quirk: module specific disable quirk
  */
 struct sysc {
        struct device *dev;
@@ -100,6 +101,7 @@ struct sysc {
        void (*clk_disable_quirk)(struct sysc *sysc);
        void (*reset_done_quirk)(struct sysc *sysc);
        void (*module_enable_quirk)(struct sysc *sysc);
+       void (*module_disable_quirk)(struct sysc *sysc);
 };
 
 static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -959,6 +961,9 @@ static int sysc_disable_module(struct device *dev)
        if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
                return 0;
 
+       if (ddata->module_disable_quirk)
+               ddata->module_disable_quirk(ddata);
+
        regbits = ddata->cap->regbits;
        reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
 
@@ -1248,6 +1253,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
                   SYSC_MODULE_QUIRK_SGX),
        SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
                   SYSC_MODULE_QUIRK_WDT),
+       /* Watchdog on am3 and am4 */
+       SYSC_QUIRK("wdt", 0x44e35000, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
+                  SYSC_MODULE_QUIRK_WDT | SYSC_QUIRK_SWSUP_SIDLE),
 
 #ifdef DEBUG
        SYSC_QUIRK("adc", 0, 0, 0x10, -1, 0x47300001, 0xffffffff, 0),
@@ -1440,14 +1448,14 @@ static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable spr failed\n");
+               dev_warn(ddata->dev, "wdt disable step1 failed\n");
 
-       sysc_write(ddata, wps, 0x5555);
+       sysc_write(ddata, spr, 0x5555);
        error = readl_poll_timeout(ddata->module_va + wps, val,
                                   !(val & 0x10), 100,
                                   MAX_MODULE_SOFTRESET_WAIT);
        if (error)
-               dev_warn(ddata->dev, "wdt disable wps failed\n");
+               dev_warn(ddata->dev, "wdt disable step2 failed\n");
 }
 
 static void sysc_init_module_quirks(struct sysc *ddata)
@@ -1471,8 +1479,10 @@ static void sysc_init_module_quirks(struct sysc *ddata)
        if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
                ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
 
-       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
+       if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT) {
                ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
+               ddata->module_disable_quirk = sysc_reset_done_quirk_wdt;
+       }
 }
 
 static int sysc_clockdomain_init(struct sysc *ddata)
index c2f7de9dc54330b4df0064d7c5227f0af7686b56..de434feb873af853823bad6bae3db74b542bd71a 100644 (file)
@@ -2520,4 +2520,4 @@ void add_bootloader_randomness(const void *buf, unsigned int size)
        else
                add_device_randomness(buf, size);
 }
-EXPORT_SYMBOL_GPL(add_bootloader_randomness);
\ No newline at end of file
+EXPORT_SYMBOL_GPL(add_bootloader_randomness);
index b57fe09b428beb267eeb06b60cdff01491905d93..9dd6185a4b4e2f914ab0b0b7969f43401cbef9f9 100644 (file)
@@ -683,7 +683,7 @@ static const struct omap_clkctrl_reg_data dra7_l4per2_clkctrl_regs[] __initconst
        { DRA7_L4PER2_MCASP2_CLKCTRL, dra7_mcasp2_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0154:22" },
        { DRA7_L4PER2_MCASP3_CLKCTRL, dra7_mcasp3_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:015c:22" },
        { DRA7_L4PER2_MCASP5_CLKCTRL, dra7_mcasp5_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:016c:22" },
-       { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:24" },
+       { DRA7_L4PER2_MCASP8_CLKCTRL, dra7_mcasp8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:0184:22" },
        { DRA7_L4PER2_MCASP4_CLKCTRL, dra7_mcasp4_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:018c:22" },
        { DRA7_L4PER2_UART7_CLKCTRL, dra7_uart7_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01c4:24" },
        { DRA7_L4PER2_UART8_CLKCTRL, dra7_uart8_bit_data, CLKF_SW_SUP, "l4per2-clkctrl:01d4:24" },
@@ -828,8 +828,8 @@ static struct ti_dt_clk dra7xx_clks[] = {
        DT_CLK(NULL, "mcasp6_aux_gfclk_mux", "l4per2-clkctrl:01f8:22"),
        DT_CLK(NULL, "mcasp7_ahclkx_mux", "l4per2-clkctrl:01fc:24"),
        DT_CLK(NULL, "mcasp7_aux_gfclk_mux", "l4per2-clkctrl:01fc:22"),
-       DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:22"),
-       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:24"),
+       DT_CLK(NULL, "mcasp8_ahclkx_mux", "l4per2-clkctrl:0184:24"),
+       DT_CLK(NULL, "mcasp8_aux_gfclk_mux", "l4per2-clkctrl:0184:22"),
        DT_CLK(NULL, "mmc1_clk32k", "l3init-clkctrl:0008:8"),
        DT_CLK(NULL, "mmc1_fclk_div", "l3init-clkctrl:0008:25"),
        DT_CLK(NULL, "mmc1_fclk_mux", "l3init-clkctrl:0008:24"),
index d8c2bd4391d027acf9f4609153a39514e7043f3a..11ff701ff4bb99076a4e19b9faa10e1bd8229d7b 100644 (file)
@@ -25,7 +25,9 @@ static __init void timer_of_irq_exit(struct of_timer_irq *of_irq)
 
        struct clock_event_device *clkevt = &to->clkevt;
 
-       of_irq->percpu ? free_percpu_irq(of_irq->irq, clkevt) :
+       if (of_irq->percpu)
+               free_percpu_irq(of_irq->irq, clkevt);
+       else
                free_irq(of_irq->irq, clkevt);
 }
 
index c52d6fa32aac8125abfeadbac8ffe5d7e9cb5ff2..48a224a6b17815ae789ee8bede83508cc8f077d9 100644 (file)
@@ -720,7 +720,7 @@ static ssize_t store_##file_name                                    \
        if (ret != 1)                                                   \
                return -EINVAL;                                         \
                                                                        \
-       ret = dev_pm_qos_update_request(policy->object##_freq_req, val);\
+       ret = freq_qos_update_request(policy->object##_freq_req, val);\
        return ret >= 0 ? count : ret;                                  \
 }
 
@@ -1202,19 +1202,21 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
                goto err_free_real_cpus;
        }
 
+       freq_constraints_init(&policy->constraints);
+
        policy->nb_min.notifier_call = cpufreq_notifier_min;
        policy->nb_max.notifier_call = cpufreq_notifier_max;
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_min,
-                                     DEV_PM_QOS_MIN_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                   &policy->nb_min);
        if (ret) {
                dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
                goto err_kobj_remove;
        }
 
-       ret = dev_pm_qos_add_notifier(dev, &policy->nb_max,
-                                     DEV_PM_QOS_MAX_FREQUENCY);
+       ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                   &policy->nb_max);
        if (ret) {
                dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
                        ret, cpumask_pr_args(policy->cpus));
@@ -1232,8 +1234,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
        return policy;
 
 err_min_qos_notifier:
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
 err_kobj_remove:
        cpufreq_policy_put_kobj(policy);
 err_free_real_cpus:
@@ -1250,7 +1252,6 @@ err_free_policy:
 
 static void cpufreq_policy_free(struct cpufreq_policy *policy)
 {
-       struct device *dev = get_cpu_device(policy->cpu);
        unsigned long flags;
        int cpu;
 
@@ -1262,10 +1263,13 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                per_cpu(cpufreq_cpu_data, cpu) = NULL;
        write_unlock_irqrestore(&cpufreq_driver_lock, flags);
 
-       dev_pm_qos_remove_notifier(dev, &policy->nb_max,
-                                  DEV_PM_QOS_MAX_FREQUENCY);
-       dev_pm_qos_remove_notifier(dev, &policy->nb_min,
-                                  DEV_PM_QOS_MIN_FREQUENCY);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
+                                &policy->nb_max);
+       freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
+                                &policy->nb_min);
+
+       /* Cancel any pending policy->update work before freeing the policy. */
+       cancel_work_sync(&policy->update);
 
        if (policy->max_freq_req) {
                /*
@@ -1274,10 +1278,10 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
                 */
                blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
                                             CPUFREQ_REMOVE_POLICY, policy);
-               dev_pm_qos_remove_request(policy->max_freq_req);
+               freq_qos_remove_request(policy->max_freq_req);
        }
 
-       dev_pm_qos_remove_request(policy->min_freq_req);
+       freq_qos_remove_request(policy->min_freq_req);
        kfree(policy->min_freq_req);
 
        cpufreq_policy_put_kobj(policy);
@@ -1357,8 +1361,6 @@ static int cpufreq_online(unsigned int cpu)
        cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
 
        if (new_policy) {
-               struct device *dev = get_cpu_device(cpu);
-
                for_each_cpu(j, policy->related_cpus) {
                        per_cpu(cpufreq_cpu_data, j) = policy;
                        add_cpu_dev_symlink(policy, j);
@@ -1369,36 +1371,31 @@ static int cpufreq_online(unsigned int cpu)
                if (!policy->min_freq_req)
                        goto out_destroy_policy;
 
-               ret = dev_pm_qos_add_request(dev, policy->min_freq_req,
-                                            DEV_PM_QOS_MIN_FREQUENCY,
-                                            policy->min);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->min_freq_req, FREQ_QOS_MIN,
+                                          policy->min);
                if (ret < 0) {
                        /*
-                        * So we don't call dev_pm_qos_remove_request() for an
+                        * So we don't call freq_qos_remove_request() for an
                         * uninitialized request.
                         */
                        kfree(policy->min_freq_req);
                        policy->min_freq_req = NULL;
-
-                       dev_err(dev, "Failed to add min-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
                /*
                 * This must be initialized right here to avoid calling
-                * dev_pm_qos_remove_request() on uninitialized request in case
+                * freq_qos_remove_request() on uninitialized request in case
                 * of errors.
                 */
                policy->max_freq_req = policy->min_freq_req + 1;
 
-               ret = dev_pm_qos_add_request(dev, policy->max_freq_req,
-                                            DEV_PM_QOS_MAX_FREQUENCY,
-                                            policy->max);
+               ret = freq_qos_add_request(&policy->constraints,
+                                          policy->max_freq_req, FREQ_QOS_MAX,
+                                          policy->max);
                if (ret < 0) {
                        policy->max_freq_req = NULL;
-                       dev_err(dev, "Failed to add max-freq constraint (%d)\n",
-                               ret);
                        goto out_destroy_policy;
                }
 
@@ -2374,7 +2371,6 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
                       struct cpufreq_policy *new_policy)
 {
        struct cpufreq_governor *old_gov;
-       struct device *cpu_dev = get_cpu_device(policy->cpu);
        int ret;
 
        pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
@@ -2386,8 +2382,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy,
         * PM QoS framework collects all the requests from users and provide us
         * the final aggregated value here.
         */
-       new_policy->min = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MIN_FREQUENCY);
-       new_policy->max = dev_pm_qos_read_value(cpu_dev, DEV_PM_QOS_MAX_FREQUENCY);
+       new_policy->min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
+       new_policy->max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
 
        /* verify the cpu speed can be set within this limit */
        ret = cpufreq_driver->verify(new_policy);
@@ -2518,7 +2514,7 @@ static int cpufreq_boost_set_sw(int state)
                        break;
                }
 
-               ret = dev_pm_qos_update_request(policy->max_freq_req, policy->max);
+               ret = freq_qos_update_request(policy->max_freq_req, policy->max);
                if (ret < 0)
                        break;
        }
@@ -2737,14 +2733,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
 }
 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
 
-/*
- * Stop cpufreq at shutdown to make sure it isn't holding any locks
- * or mutexes when secondary CPUs are halted.
- */
-static struct syscore_ops cpufreq_syscore_ops = {
-       .shutdown = cpufreq_suspend,
-};
-
 struct kobject *cpufreq_global_kobject;
 EXPORT_SYMBOL(cpufreq_global_kobject);
 
@@ -2756,8 +2744,6 @@ static int __init cpufreq_core_init(void)
        cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
        BUG_ON(!cpufreq_global_kobject);
 
-       register_syscore_ops(&cpufreq_syscore_ops);
-
        return 0;
 }
 module_param(off, int, 0444);
index 9f02de9a1b4743ea7d01c2a3ff9f85e0cb81a997..53a51c169451f7f71f1f99652a42cb5532aa5963 100644 (file)
@@ -1088,10 +1088,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
 
 static struct cpufreq_driver intel_pstate;
 
-static void update_qos_request(enum dev_pm_qos_req_type type)
+static void update_qos_request(enum freq_qos_req_type type)
 {
        int max_state, turbo_max, freq, i, perf_pct;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpufreq_policy *policy;
 
        for_each_possible_cpu(i) {
@@ -1112,7 +1112,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                else
                        turbo_max = cpu->pstate.turbo_pstate;
 
-               if (type == DEV_PM_QOS_MIN_FREQUENCY) {
+               if (type == FREQ_QOS_MIN) {
                        perf_pct = global.min_perf_pct;
                } else {
                        req++;
@@ -1122,7 +1122,7 @@ static void update_qos_request(enum dev_pm_qos_req_type type)
                freq = DIV_ROUND_UP(turbo_max * perf_pct, 100);
                freq *= cpu->pstate.scaling;
 
-               if (dev_pm_qos_update_request(req, freq) < 0)
+               if (freq_qos_update_request(req, freq) < 0)
                        pr_warn("Failed to update freq constraint: CPU%d\n", i);
        }
 }
@@ -1153,7 +1153,7 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MAX_FREQUENCY);
+               update_qos_request(FREQ_QOS_MAX);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -1187,7 +1187,7 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
        if (intel_pstate_driver == &intel_pstate)
                intel_pstate_update_policies();
        else
-               update_qos_request(DEV_PM_QOS_MIN_FREQUENCY);
+               update_qos_request(FREQ_QOS_MIN);
 
        mutex_unlock(&intel_pstate_driver_lock);
 
@@ -2381,7 +2381,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
 {
        int max_state, turbo_max, min_freq, max_freq, ret;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        struct cpudata *cpu;
        struct device *dev;
 
@@ -2416,15 +2416,15 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        max_freq = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
        max_freq *= cpu->pstate.scaling;
 
-       ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_MIN_FREQUENCY,
-                                    min_freq);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
+                                  min_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
                goto free_req;
        }
 
-       ret = dev_pm_qos_add_request(dev, req + 1, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
+       ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
+                                  max_freq);
        if (ret < 0) {
                dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
                goto remove_min_req;
@@ -2435,7 +2435,7 @@ static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
        return 0;
 
 remove_min_req:
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req);
 free_req:
        kfree(req);
 pstate_exit:
@@ -2446,12 +2446,12 @@ pstate_exit:
 
 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
 
        req = policy->driver_data;
 
-       dev_pm_qos_remove_request(req + 1);
-       dev_pm_qos_remove_request(req);
+       freq_qos_remove_request(req + 1);
+       freq_qos_remove_request(req);
        kfree(req);
 
        return intel_pstate_cpu_exit(policy);
index bc9dd30395c402893d2ca71a5b78a96888c3526e..037fe23bc6ed0feeb76384126706a24cab82b863 100644 (file)
@@ -65,7 +65,7 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_set_pmode_pmi);
 static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 {
        struct cpufreq_policy *policy;
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        u8 node, slow_mode;
        int cpu, ret;
 
@@ -86,7 +86,7 @@ static void cbe_cpufreq_handle_pmi(pmi_message_t pmi_msg)
 
        req = policy->driver_data;
 
-       ret = dev_pm_qos_update_request(req,
+       ret = freq_qos_update_request(req,
                        policy->freq_table[slow_mode].frequency);
        if (ret < 0)
                pr_warn("Failed to update freq constraint: %d\n", ret);
@@ -103,7 +103,7 @@ static struct pmi_handler cbe_pmi_handler = {
 
 void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req;
+       struct freq_qos_request *req;
        int ret;
 
        if (!cbe_cpufreq_has_pmi)
@@ -113,9 +113,8 @@ void cbe_cpufreq_pmi_policy_init(struct cpufreq_policy *policy)
        if (!req)
                return;
 
-       ret = dev_pm_qos_add_request(get_cpu_device(policy->cpu), req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    policy->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MAX,
+                                  policy->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("Failed to add freq constraint (%d)\n", ret);
                kfree(req);
@@ -128,10 +127,10 @@ EXPORT_SYMBOL_GPL(cbe_cpufreq_pmi_policy_init);
 
 void cbe_cpufreq_pmi_policy_exit(struct cpufreq_policy *policy)
 {
-       struct dev_pm_qos_request *req = policy->driver_data;
+       struct freq_qos_request *req = policy->driver_data;
 
        if (cbe_cpufreq_has_pmi) {
-               dev_pm_qos_remove_request(req);
+               freq_qos_remove_request(req);
                kfree(req);
        }
 }
index 932390b028f1df148d8f96bd6a5b1b6b36fcec72..b0ce9bc78113e6bde532101ffe0beba9c9baf860 100644 (file)
@@ -95,6 +95,10 @@ static int __init haltpoll_init(void)
        int ret;
        struct cpuidle_driver *drv = &haltpoll_driver;
 
+       /* Do not load haltpoll if idle= is passed */
+       if (boot_option_idle_override != IDLE_NO_OVERRIDE)
+               return -ENODEV;
+
        cpuidle_poll_state_init(drv);
 
        if (!kvm_para_available() ||
index 774d991d7cca49011016d41c00914ad84059ccb8..aca75237bbcf83eb1d440bcdf1e4d3b702cff0a1 100644 (file)
@@ -1297,7 +1297,7 @@ static void make_established(struct sock *sk, u32 snd_isn, unsigned int opt)
        tp->write_seq = snd_isn;
        tp->snd_nxt = snd_isn;
        tp->snd_una = snd_isn;
-       inet_sk(sk)->inet_id = tp->write_seq ^ jiffies;
+       inet_sk(sk)->inet_id = prandom_u32();
        assign_rxopt(sk, opt);
 
        if (tp->rcv_wnd > (RCV_BUFSIZ_M << 10))
index 0891ab829b1b6b1318353e945e6394fab29f7c1e..98bc5a4cd5e7014990f064a92777308ae98b13e4 100644 (file)
@@ -1702,7 +1702,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
                return peekmsg(sk, msg, len, nonblock, flags);
 
        if (sk_can_busy_loop(sk) &&
-           skb_queue_empty(&sk->sk_receive_queue) &&
+           skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            sk->sk_state == TCP_ESTABLISHED)
                sk_busy_loop(sk, nonblock);
 
index 42a8f3f1168133424091f35e011f58c2e757b890..709002515550cd2e6ff989a4c2bdd11b5b0cdf9c 100644 (file)
@@ -471,7 +471,7 @@ unlock:
        if (pfence_excl)
                *pfence_excl = fence_excl;
        else if (fence_excl)
-               shared[++shared_count] = fence_excl;
+               shared[shared_count++] = fence_excl;
 
        if (!shared_count) {
                kfree(shared);
index 9ba74ab7e912e90fe78ab9f02b4120d79ab14fb4..c27e206a764c3599ef8e548c4d9491399ce70c13 100644 (file)
@@ -1707,6 +1707,14 @@ static void sdma_add_scripts(struct sdma_engine *sdma,
        if (!sdma->script_number)
                sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
 
+       if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
+                                 / sizeof(s32)) {
+               dev_err(sdma->dev,
+                       "SDMA script number %d not match with firmware.\n",
+                       sdma->script_number);
+               return;
+       }
+
        for (i = 0; i < sdma->script_number; i++)
                if (addr_arr[i] > 0)
                        saddr_arr[i] = addr_arr[i];
index 8e90a405939df4989d69d001efc803515ca1515a..ef73f65224b1b7b3d419b7543478cfbb63da6c07 100644 (file)
@@ -694,6 +694,25 @@ static int bam_dma_terminate_all(struct dma_chan *chan)
 
        /* remove all transactions, including active transaction */
        spin_lock_irqsave(&bchan->vc.lock, flag);
+       /*
+        * If we have transactions queued, then some might be committed to the
+        * hardware in the desc fifo.  The only way to reset the desc fifo is
+        * to do a hardware reset (either by pipe or the entire block).
+        * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
+        * pipe.  If the pipe is left disabled (default state after pipe reset)
+        * and is accessed by a connected hardware engine, a fatal error in
+        * the BAM will occur.  There is a small window where this could happen
+        * with bam_chan_init_hw(), but it is assumed that the caller has
+        * stopped activity on any attached hardware engine.  Make sure to do
+        * this first so that the BAM hardware doesn't cause memory corruption
+        * by accessing freed resources.
+        */
+       if (!list_empty(&bchan->desc_list)) {
+               async_desc = list_first_entry(&bchan->desc_list,
+                                             struct bam_async_desc, desc_node);
+               bam_chan_init_hw(bchan, async_desc->dir);
+       }
+
        list_for_each_entry_safe(async_desc, tmp,
                                 &bchan->desc_list, desc_node) {
                list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
index 525dc7338fe3b86842f4d2aa3ffd9857d34ec5f6..8546ad03472083d76156995158e860e106be028b 100644 (file)
 #define SPRD_DMA_SRC_TRSF_STEP_OFFSET  0
 #define SPRD_DMA_TRSF_STEP_MASK                GENMASK(15, 0)
 
+/* SPRD DMA_SRC_BLK_STEP register definition */
+#define SPRD_DMA_LLIST_HIGH_MASK       GENMASK(31, 28)
+#define SPRD_DMA_LLIST_HIGH_SHIFT      28
+
 /* define DMA channel mode & trigger mode mask */
 #define SPRD_DMA_CHN_MODE_MASK         GENMASK(7, 0)
 #define SPRD_DMA_TRG_MODE_MASK         GENMASK(7, 0)
@@ -208,6 +212,7 @@ struct sprd_dma_dev {
        struct sprd_dma_chn     channels[0];
 };
 
+static void sprd_dma_free_desc(struct virt_dma_desc *vd);
 static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
 static struct of_dma_filter_info sprd_dma_info = {
        .filter_fn = sprd_dma_filter_fn,
@@ -609,12 +614,19 @@ static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
 static void sprd_dma_free_chan_resources(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_free_chan_resources(&schan->vc);
        pm_runtime_put(chan->device->dev);
 }
@@ -717,6 +729,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
        u32 int_mode = flags & SPRD_DMA_INT_MASK;
        int src_datawidth, dst_datawidth, src_step, dst_step;
        u32 temp, fix_mode = 0, fix_en = 0;
+       phys_addr_t llist_ptr;
 
        if (dir == DMA_MEM_TO_DEV) {
                src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
@@ -814,13 +827,16 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
                 * Set the link-list pointer point to next link-list
                 * configuration's physical address.
                 */
-               hw->llist_ptr = schan->linklist.phy_addr + temp;
+               llist_ptr = schan->linklist.phy_addr + temp;
+               hw->llist_ptr = lower_32_bits(llist_ptr);
+               hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
+                       SPRD_DMA_LLIST_HIGH_MASK;
        } else {
                hw->llist_ptr = 0;
+               hw->src_blk_step = 0;
        }
 
        hw->frg_step = 0;
-       hw->src_blk_step = 0;
        hw->des_blk_step = 0;
        return 0;
 }
@@ -1023,15 +1039,22 @@ static int sprd_dma_resume(struct dma_chan *chan)
 static int sprd_dma_terminate_all(struct dma_chan *chan)
 {
        struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
+       struct virt_dma_desc *cur_vd = NULL;
        unsigned long flags;
        LIST_HEAD(head);
 
        spin_lock_irqsave(&schan->vc.lock, flags);
+       if (schan->cur_desc)
+               cur_vd = &schan->cur_desc->vd;
+
        sprd_dma_stop(schan);
 
        vchan_get_all_descriptors(&schan->vc, &head);
        spin_unlock_irqrestore(&schan->vc.lock, flags);
 
+       if (cur_vd)
+               sprd_dma_free_desc(cur_vd);
+
        vchan_dma_desc_free_list(&schan->vc, &head);
        return 0;
 }
index 5f8adf5c1f203f4c1905f728c752ed2dbbfe64b5..6e1268552f74070277e28ed1036df1b4349bca7b 100644 (file)
@@ -40,6 +40,7 @@
 #define ADMA_CH_CONFIG_MAX_BURST_SIZE                   16
 #define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val)             ((val) & 0xf)
 #define ADMA_CH_CONFIG_MAX_BUFS                                8
+#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
 
 #define ADMA_CH_FIFO_CTRL                              0x2c
 #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val)         (((val) & 0xf) << 8)
@@ -77,6 +78,7 @@ struct tegra_adma;
  * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
  * @ch_req_rx_shift: Register offset for AHUB receive channel select.
  * @ch_base_offset: Register offset of DMA channel registers.
+ * @has_outstanding_reqs: If DMA channel can have outstanding requests.
  * @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
  * @ch_req_mask: Mask for Tx or Rx channel select.
  * @ch_req_max: Maximum number of Tx or Rx channels available.
@@ -95,6 +97,7 @@ struct tegra_adma_chip_data {
        unsigned int ch_req_max;
        unsigned int ch_reg_size;
        unsigned int nr_channels;
+       bool has_outstanding_reqs;
 };
 
 /*
@@ -594,6 +597,8 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
                         ADMA_CH_CTRL_FLOWCTRL_EN;
        ch_regs->config |= cdata->adma_get_burst_config(burst_size);
        ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
+       if (cdata->has_outstanding_reqs)
+               ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
        ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
        ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
 
@@ -778,6 +783,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
        .ch_req_tx_shift        = 28,
        .ch_req_rx_shift        = 24,
        .ch_base_offset         = 0,
+       .has_outstanding_reqs   = false,
        .ch_fifo_ctrl           = TEGRA210_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0xf,
        .ch_req_max             = 10,
@@ -792,6 +798,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
        .ch_req_tx_shift        = 27,
        .ch_req_rx_shift        = 22,
        .ch_base_offset         = 0x10000,
+       .has_outstanding_reqs   = true,
        .ch_fifo_ctrl           = TEGRA186_FIFO_CTRL_DEFAULT,
        .ch_req_mask            = 0x1f,
        .ch_req_max             = 20,
index 2f946f55076c549847fbd0fb1044af3e6f8ed277..8c2f7ebe998c0ba1ec290fa6b6ac4083424daf88 100644 (file)
@@ -586,9 +586,22 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
        enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
 {
        struct cppi41_channel *c = to_cpp41_chan(chan);
+       struct dma_async_tx_descriptor *txd = NULL;
+       struct cppi41_dd *cdd = c->cdd;
        struct cppi41_desc *d;
        struct scatterlist *sg;
        unsigned int i;
+       int error;
+
+       error = pm_runtime_get(cdd->ddev.dev);
+       if (error < 0) {
+               pm_runtime_put_noidle(cdd->ddev.dev);
+
+               return NULL;
+       }
+
+       if (cdd->is_suspended)
+               goto err_out_not_ready;
 
        d = c->desc;
        for_each_sg(sgl, sg, sg_len, i) {
@@ -611,7 +624,13 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
                d++;
        }
 
-       return &c->txd;
+       txd = &c->txd;
+
+err_out_not_ready:
+       pm_runtime_mark_last_busy(cdd->ddev.dev);
+       pm_runtime_put_autosuspend(cdd->ddev.dev);
+
+       return txd;
 }
 
 static void cppi41_compute_td_desc(struct cppi41_desc *d)
index e7dc3c4dc8e077d855410055539a2b88a16d9bfb..5d56f1e4d332ce5386b97efb0ec8063b1c659316 100644 (file)
@@ -68,6 +68,9 @@
 #define XILINX_DMA_DMACR_CIRC_EN               BIT(1)
 #define XILINX_DMA_DMACR_RUNSTOP               BIT(0)
 #define XILINX_DMA_DMACR_FSYNCSRC_MASK         GENMASK(6, 5)
+#define XILINX_DMA_DMACR_DELAY_MASK            GENMASK(31, 24)
+#define XILINX_DMA_DMACR_FRAME_COUNT_MASK      GENMASK(23, 16)
+#define XILINX_DMA_DMACR_MASTER_MASK           GENMASK(11, 8)
 
 #define XILINX_DMA_REG_DMASR                   0x0004
 #define XILINX_DMA_DMASR_EOL_LATE_ERR          BIT(15)
@@ -1354,7 +1357,8 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
                                           node);
                hw = &segment->hw;
 
-               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
+               xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
+                            xilinx_prep_dma_addr_t(hw->buf_addr));
 
                /* Start the transfer */
                dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -2117,8 +2121,10 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.gen_lock = cfg->gen_lock;
        chan->config.master = cfg->master;
 
+       dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
        if (cfg->gen_lock && chan->genlock) {
                dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
+               dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
                dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
        }
 
@@ -2134,11 +2140,13 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
        chan->config.delay = cfg->delay;
 
        if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
                dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
                chan->config.coalesc = cfg->coalesc;
        }
 
        if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
+               dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
                dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
                chan->config.delay = cfg->delay;
        }
index d413a0bdc9ad9da8637b811e79a81023c39c1906..0bb62857ffb2414cb0b2da26c3591b5c7fb10cbe 100644 (file)
@@ -553,7 +553,11 @@ void ghes_edac_unregister(struct ghes *ghes)
        if (!ghes_pvt)
                return;
 
+       if (atomic_dec_return(&ghes_init))
+               return;
+
        mci = ghes_pvt->mci;
+       ghes_pvt = NULL;
        edac_mc_del_mc(mci->pdev);
        edac_mc_free(mci);
 }
index 35ed56b9c34f141a3cdd33c42853bdfc35f13931..1e21fc3e9851a4e6a322173ebffcc2ad1561c9c6 100644 (file)
@@ -408,7 +408,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
                bytes = ~0ull;
        else if (size & 0x8000)
                bytes = (u64)(size & 0x7fff) << 10;
-       else if (size != 0x7fff)
+       else if (size != 0x7fff || dm->length < 0x20)
                bytes = (u64)size << 20;
        else
                bytes = (u64)get_unaligned((u32 *)&d[0x1C]) << 20;
index 178ee8106828775f90a317bbb86a3b75d8dfb9ac..b248870a980611034e616ce403f19e5cca995c86 100644 (file)
@@ -182,6 +182,7 @@ config RESET_ATTACK_MITIGATION
 
 config EFI_RCI2_TABLE
        bool "EFI Runtime Configuration Interface Table Version 2 Support"
+       depends on X86 || COMPILE_TEST
        help
          Displays the content of the Runtime Configuration Interface
          Table version 2 on Dell EMC PowerEdge systems as a binary
index addf0749dd8b6ea6b3d6f423363fbdfddbaea17e..b1af0de2e10080ef8080183d0797436217520b28 100644 (file)
@@ -381,7 +381,7 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
                printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx,
                       pcie->device_id.vendor_id, pcie->device_id.device_id);
                p = pcie->device_id.class_code;
-               printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]);
+               printk("%s""class_code: %02x%02x%02x\n", pfx, p[2], p[1], p[0]);
        }
        if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER)
                printk("%s""serial number: 0x%04x, 0x%04x\n", pfx,
index 8d3e778e988b3d139f04e8728c2a0adf1bfa072f..e98bbf8e56d9b07e7c609fb614e2105decd99175 100644 (file)
@@ -267,6 +267,9 @@ static __init int efivar_ssdt_load(void)
        void *data;
        int ret;
 
+       if (!efivar_ssdt[0])
+               return 0;
+
        ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
 
        list_for_each_entry_safe(entry, aux, &entries, list) {
@@ -551,7 +554,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
                                              sizeof(*seed) + size);
                        if (seed != NULL) {
                                pr_notice("seeding entropy pool\n");
-                               add_device_randomness(seed->bits, seed->size);
+                               add_bootloader_randomness(seed->bits, seed->size);
                                early_memunmap(seed, sizeof(*seed) + size);
                        } else {
                                pr_err("Could not map UEFI random seed!\n");
index 0460c7581220e13cc8418488042d9a7bb17c0b3e..ee0661ddb25bb6a9e74f52e5c2cced5e85f632c7 100644 (file)
@@ -52,6 +52,7 @@ lib-$(CONFIG_EFI_ARMSTUB)     += arm-stub.o fdt.o string.o random.o \
 
 lib-$(CONFIG_ARM)              += arm32-stub.o
 lib-$(CONFIG_ARM64)            += arm64-stub.o
+CFLAGS_arm32-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 CFLAGS_arm64-stub.o            := -DTEXT_OFFSET=$(TEXT_OFFSET)
 
 #
index e8f7aefb6813d9c25b85ae367c01aa935a0fe8d1..41213bf5fcf5e8a84619923757fd033cce66761b 100644 (file)
@@ -195,6 +195,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
                                 unsigned long dram_base,
                                 efi_loaded_image_t *image)
 {
+       unsigned long kernel_base;
        efi_status_t status;
 
        /*
@@ -204,9 +205,18 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
         * loaded. These assumptions are made by the decompressor,
         * before any memory map is available.
         */
-       dram_base = round_up(dram_base, SZ_128M);
+       kernel_base = round_up(dram_base, SZ_128M);
 
-       status = reserve_kernel_base(sys_table, dram_base, reserve_addr,
+       /*
+        * Note that some platforms (notably, the Raspberry Pi 2) put
+        * spin-tables and other pieces of firmware at the base of RAM,
+        * abusing the fact that the window of TEXT_OFFSET bytes at the
+        * base of the kernel image is only partially used at the moment.
+        * (Up to 5 pages are used for the swapper page tables)
+        */
+       kernel_base += TEXT_OFFSET - 5 * PAGE_SIZE;
+
+       status = reserve_kernel_base(sys_table, kernel_base, reserve_addr,
                                     reserve_size);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Unable to allocate memory for uncompressed kernel.\n");
@@ -220,7 +230,7 @@ efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
        *image_size = image->image_size;
        status = efi_relocate_kernel(sys_table, image_addr, *image_size,
                                     *image_size,
-                                    dram_base + MAX_UNCOMP_KERNEL_SIZE, 0);
+                                    kernel_base + MAX_UNCOMP_KERNEL_SIZE, 0, 0);
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table, "Failed to relocate kernel.\n");
                efi_free(sys_table, *reserve_size, *reserve_addr);
index 3caae7f2cf56772936b6f77484ef1f7e1a2b42c2..35dbc2791c973f6281470bce96016f1ce37e95a9 100644 (file)
@@ -260,11 +260,11 @@ fail:
 }
 
 /*
- * Allocate at the lowest possible address.
+ * Allocate at the lowest possible address that is not below 'min'.
  */
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
-                          unsigned long size, unsigned long align,
-                          unsigned long *addr)
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min)
 {
        unsigned long map_size, desc_size, buff_size;
        efi_memory_desc_t *map;
@@ -311,13 +311,8 @@ efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                start = desc->phys_addr;
                end = start + desc->num_pages * EFI_PAGE_SIZE;
 
-               /*
-                * Don't allocate at 0x0. It will confuse code that
-                * checks pointers against NULL. Skip the first 8
-                * bytes so we start at a nice even number.
-                */
-               if (start == 0x0)
-                       start += 8;
+               if (start < min)
+                       start = min;
 
                start = round_up(start, align);
                if ((start + size) > end)
@@ -698,7 +693,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment)
+                                unsigned long alignment,
+                                unsigned long min_addr)
 {
        unsigned long cur_image_addr;
        unsigned long new_addr = 0;
@@ -731,8 +727,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
         * possible.
         */
        if (status != EFI_SUCCESS) {
-               status = efi_low_alloc(sys_table_arg, alloc_size, alignment,
-                                      &new_addr);
+               status = efi_low_alloc_above(sys_table_arg, alloc_size,
+                                            alignment, &new_addr, min_addr);
        }
        if (status != EFI_SUCCESS) {
                pr_efi_err(sys_table_arg, "Failed to allocate usable memory for kernel.\n");
index 3e290f96620a408e1c0eda16e53d85a866208a87..76b0c354a0277ecd1cb3531fe6ca60ef3eed3eb0 100644 (file)
@@ -76,7 +76,7 @@ static u16 checksum(void)
        return chksum;
 }
 
-int __init efi_rci2_sysfs_init(void)
+static int __init efi_rci2_sysfs_init(void)
 {
        struct kobject *tables_kobj;
        int ret = -ENOMEM;
index 877745c3aaf2833fed62659aa38c52c0f3b2eb1c..7baf48c01e72f2f63e65e802609059da9cdb12ee 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/proc_fs.h>
 #include <linux/efi.h>
+#include <linux/security.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 
@@ -717,6 +718,13 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
 
 static int efi_test_open(struct inode *inode, struct file *file)
 {
+       int ret = security_locked_down(LOCKDOWN_EFI_TEST);
+
+       if (ret)
+               return ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
        /*
         * nothing special to do here
         * We do accept multiple open files at the same time as we
index 1d3f5ca3eaaf8b6397b39e17a8676be3466f2e26..31f9f0e369b977345dee40523d2f615aec402e84 100644 (file)
@@ -40,7 +40,7 @@ int __init efi_tpm_eventlog_init(void)
 {
        struct linux_efi_tpm_eventlog *log_tbl;
        struct efi_tcg2_final_events_table *final_tbl;
-       unsigned int tbl_size;
+       int tbl_size;
        int ret = 0;
 
        if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
@@ -75,16 +75,29 @@ int __init efi_tpm_eventlog_init(void)
                goto out;
        }
 
-       tbl_size = tpm2_calc_event_log_size((void *)efi.tpm_final_log
-                                           + sizeof(final_tbl->version)
-                                           + sizeof(final_tbl->nr_events),
-                                           final_tbl->nr_events,
-                                           log_tbl->log);
+       tbl_size = 0;
+       if (final_tbl->nr_events != 0) {
+               void *events = (void *)efi.tpm_final_log
+                               + sizeof(final_tbl->version)
+                               + sizeof(final_tbl->nr_events);
+
+               tbl_size = tpm2_calc_event_log_size(events,
+                                                   final_tbl->nr_events,
+                                                   log_tbl->log);
+       }
+
+       if (tbl_size < 0) {
+               pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
+               ret = -EINVAL;
+               goto out_calc;
+       }
+
        memblock_reserve((unsigned long)final_tbl,
                         tbl_size + sizeof(*final_tbl));
-       early_memunmap(final_tbl, sizeof(*final_tbl));
        efi_tpm_final_log_size = tbl_size;
 
+out_calc:
+       early_memunmap(final_tbl, sizeof(*final_tbl));
 out:
        early_memunmap(log_tbl, sizeof(*log_tbl));
        return ret;
index dda525c0f968271ad576381d103ad2c54d945a65..5c6f2a74f10497a5011cfb8fce0fa8c5a3e21494 100644 (file)
@@ -52,7 +52,7 @@ static int vpd_decode_entry(const u32 max_len, const u8 *input_buf,
        if (max_len - consumed < *entry_len)
                return VPD_FAIL;
 
-       consumed += decoded_len;
+       consumed += *entry_len;
        *_consumed = consumed;
        return VPD_OK;
 }
index fe7a73f52329b7b2b40d0cfcdf8df37623e992e3..bb287f35cf408f27dc245cf2d18790c56268f5f2 100644 (file)
@@ -530,11 +530,12 @@ static void sprd_eic_handle_one_type(struct gpio_chip *chip)
                }
 
                for_each_set_bit(n, &reg, SPRD_EIC_PER_BANK_NR) {
-                       girq = irq_find_mapping(chip->irq.domain,
-                                       bank * SPRD_EIC_PER_BANK_NR + n);
+                       u32 offset = bank * SPRD_EIC_PER_BANK_NR + n;
+
+                       girq = irq_find_mapping(chip->irq.domain, offset);
 
                        generic_handle_irq(girq);
-                       sprd_eic_toggle_trigger(chip, girq, n);
+                       sprd_eic_toggle_trigger(chip, girq, offset);
                }
        }
 }
index 4d835f9089df8b95892270b7756604202123e21e..86a10c808ef658c84a1218ccbd7b11e3712197fb 100644 (file)
@@ -293,8 +293,9 @@ static void intel_mid_irq_handler(struct irq_desc *desc)
        chip->irq_eoi(data);
 }
 
-static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
+static int intel_mid_irq_init_hw(struct gpio_chip *chip)
 {
+       struct intel_mid_gpio *priv = gpiochip_get_data(chip);
        void __iomem *reg;
        unsigned base;
 
@@ -309,6 +310,8 @@ static void intel_mid_irq_init_hw(struct intel_mid_gpio *priv)
                reg = gpio_reg(&priv->chip, base, GEDR);
                writel(~0, reg);
        }
+
+       return 0;
 }
 
 static int __maybe_unused intel_gpio_runtime_idle(struct device *dev)
@@ -372,6 +375,7 @@ static int intel_gpio_probe(struct pci_dev *pdev,
 
        girq = &priv->chip.irq;
        girq->chip = &intel_mid_irqchip;
+       girq->init_hw = intel_mid_irq_init_hw;
        girq->parent_handler = intel_mid_irq_handler;
        girq->num_parents = 1;
        girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -384,9 +388,8 @@ static int intel_gpio_probe(struct pci_dev *pdev,
        girq->default_type = IRQ_TYPE_NONE;
        girq->handler = handle_simple_irq;
 
-       intel_mid_irq_init_hw(priv);
-
        pci_set_drvdata(pdev, priv);
+
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
                dev_err(&pdev->dev, "gpiochip_add error %d\n", retval);
index 6bb9741ad036affb084a47937dfaa73a5f691b37..e9e47c0d5be75ce45671020028784f7a323f8910 100644 (file)
@@ -294,8 +294,9 @@ static struct irq_chip lp_irqchip = {
        .flags = IRQCHIP_SKIP_SET_WAKE,
 };
 
-static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
+static int lp_gpio_irq_init_hw(struct gpio_chip *chip)
 {
+       struct lp_gpio *lg = gpiochip_get_data(chip);
        unsigned long reg;
        unsigned base;
 
@@ -307,6 +308,8 @@ static void lp_gpio_irq_init_hw(struct lp_gpio *lg)
                reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
                outl(0xffffffff, reg);
        }
+
+       return 0;
 }
 
 static int lp_gpio_probe(struct platform_device *pdev)
@@ -364,6 +367,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
 
                girq = &gc->irq;
                girq->chip = &lp_irqchip;
+               girq->init_hw = lp_gpio_irq_init_hw;
                girq->parent_handler = lp_gpio_irq_handler;
                girq->num_parents = 1;
                girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
@@ -373,9 +377,7 @@ static int lp_gpio_probe(struct platform_device *pdev)
                        return -ENOMEM;
                girq->parents[0] = (unsigned)irq_rc->start;
                girq->default_type = IRQ_TYPE_NONE;
-               girq->handler = handle_simple_irq;
-
-               lp_gpio_irq_init_hw(lg);
+               girq->handler = handle_bad_irq;
        }
 
        ret = devm_gpiochip_add_data(dev, gc, lg);
index 47d05e357e61a05b742fd75d892fa3d3e5506af0..faf86ea9c51ab15dcddedae72a2ddb168b469cfe 100644 (file)
@@ -192,13 +192,13 @@ static int max77620_gpio_set_debounce(struct max77620_gpio *mgpio,
        case 0:
                val = MAX77620_CNFG_GPIO_DBNC_None;
                break;
-       case 1 ... 8:
+       case 1000 ... 8000:
                val = MAX77620_CNFG_GPIO_DBNC_8ms;
                break;
-       case 9 ... 16:
+       case 9000 ... 16000:
                val = MAX77620_CNFG_GPIO_DBNC_16ms;
                break;
-       case 17 ... 32:
+       case 17000 ... 32000:
                val = MAX77620_CNFG_GPIO_DBNC_32ms;
                break;
        default:
index 4f27ddfe1e2f07363cec6ab7d20d65f819be61f1..3302125e5265110722d131c7979a8a5d4da0e081 100644 (file)
@@ -397,7 +397,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 {
        const struct mrfld_gpio_pinrange *range;
        const char *pinctrl_dev_name;
-       struct gpio_irq_chip *girq;
        struct mrfld_gpio *priv;
        u32 gpio_base, irq_base;
        void __iomem *base;
@@ -445,21 +444,6 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
 
        raw_spin_lock_init(&priv->lock);
 
-       girq = &priv->chip.irq;
-       girq->chip = &mrfld_irqchip;
-       girq->parent_handler = mrfld_irq_handler;
-       girq->num_parents = 1;
-       girq->parents = devm_kcalloc(&pdev->dev, girq->num_parents,
-                                    sizeof(*girq->parents),
-                                    GFP_KERNEL);
-       if (!girq->parents)
-               return -ENOMEM;
-       girq->parents[0] = pdev->irq;
-       girq->default_type = IRQ_TYPE_NONE;
-       girq->handler = handle_bad_irq;
-
-       mrfld_irq_init_hw(priv);
-
        pci_set_drvdata(pdev, priv);
        retval = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
        if (retval) {
@@ -481,6 +465,18 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id
                }
        }
 
+       retval = gpiochip_irqchip_add(&priv->chip, &mrfld_irqchip, irq_base,
+                                     handle_bad_irq, IRQ_TYPE_NONE);
+       if (retval) {
+               dev_err(&pdev->dev, "could not connect irqchip to gpiochip\n");
+               return retval;
+       }
+
+       mrfld_irq_init_hw(priv);
+
+       gpiochip_set_chained_irqchip(&priv->chip, &mrfld_irqchip, pdev->irq,
+                                    mrfld_irq_handler);
+
        return 0;
 }
 
index 1eea2c6c2e1d827453dd16dc4c7831f5bd7d600c..80ea49f570f4215cb3e1a7304a22973f54de0583 100644 (file)
@@ -317,7 +317,7 @@ struct gpio_desc *gpiod_get_from_of_node(struct device_node *node,
        transitory = flags & OF_GPIO_TRANSITORY;
 
        ret = gpiod_request(desc, label);
-       if (ret == -EBUSY && (flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
+       if (ret == -EBUSY && (dflags & GPIOD_FLAGS_BIT_NONEXCLUSIVE))
                return desc;
        if (ret)
                return ERR_PTR(ret);
index bdbc1649eafa26ed18f0d9f3058399fce5345a2c..104ed299d5eaa41c439e61dbcde5eb70d620840c 100644 (file)
@@ -86,6 +86,7 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
                                struct lock_class_key *lock_key,
                                struct lock_class_key *request_key);
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip);
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip);
 static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip);
 
@@ -1406,6 +1407,10 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
 
        machine_gpiochip_add(chip);
 
+       ret = gpiochip_irqchip_init_hw(chip);
+       if (ret)
+               goto err_remove_acpi_chip;
+
        ret = gpiochip_irqchip_init_valid_mask(chip);
        if (ret)
                goto err_remove_acpi_chip;
@@ -1622,6 +1627,16 @@ static struct gpio_chip *find_chip_by_name(const char *name)
  * The following is irqchip helper code for gpiochips.
  */
 
+static int gpiochip_irqchip_init_hw(struct gpio_chip *gc)
+{
+       struct gpio_irq_chip *girq = &gc->irq;
+
+       if (!girq->init_hw)
+               return 0;
+
+       return girq->init_hw(gc);
+}
+
 static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc)
 {
        struct gpio_irq_chip *girq = &gc->irq;
@@ -2446,8 +2461,13 @@ static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip,
 {
        return 0;
 }
-
 static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {}
+
+static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip)
+{
+       return 0;
+}
+
 static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip)
 {
        return 0;
@@ -3070,8 +3090,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open drain by not actively driving the line high */
-               if (value)
-                       return gpiod_direction_input(desc);
+               if (value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        }
        else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) {
                ret = gpio_set_config(gc, gpio_chip_hwgpio(desc),
@@ -3079,8 +3101,10 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
                if (!ret)
                        goto set_output_value;
                /* Emulate open source by not actively driving the line low */
-               if (!value)
-                       return gpiod_direction_input(desc);
+               if (!value) {
+                       ret = gpiod_direction_input(desc);
+                       goto set_output_flag;
+               }
        } else {
                gpio_set_config(gc, gpio_chip_hwgpio(desc),
                                PIN_CONFIG_DRIVE_PUSH_PULL);
@@ -3088,6 +3112,17 @@ int gpiod_direction_output(struct gpio_desc *desc, int value)
 
 set_output_value:
        return gpiod_direction_output_raw_commit(desc, value);
+
+set_output_flag:
+       /*
+        * When emulating open-source or open-drain functionalities by not
+        * actively driving the line (setting mode to input) we still need to
+        * set the IS_OUT flag or otherwise we won't be able to set the line
+        * value anymore.
+        */
+       if (ret == 0)
+               set_bit(FLAG_IS_OUT, &desc->flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(gpiod_direction_output);
 
@@ -3448,8 +3483,6 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value)
 
        if (value) {
                ret = chip->direction_input(chip, offset);
-               if (!ret)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                ret = chip->direction_output(chip, offset, 0);
                if (!ret)
@@ -3479,8 +3512,6 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value
                        set_bit(FLAG_IS_OUT, &desc->flags);
        } else {
                ret = chip->direction_input(chip, offset);
-               if (!ret)
-                       clear_bit(FLAG_IS_OUT, &desc->flags);
        }
        trace_gpio_direction(desc_to_gpio(desc), !value, ret);
        if (ret < 0)
index 42e2c1f57152d7233d529c3ea8a81d22e18b1d53..00962a659009b27f81c5e68d5b1932c9c1e563ff 100644 (file)
@@ -54,7 +54,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
        amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \
        amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \
        amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
-       amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
+       amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o
 
 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
 
index eba42c752bca32dacdb437f2ce2ec2ba5c0f2da4..82155ac3288a07596fe4b36ae3846704c4c64b7c 100644 (file)
@@ -189,7 +189,7 @@ static int acp_hw_init(void *handle)
        u32 val = 0;
        u32 count = 0;
        struct device *dev;
-       struct i2s_platform_data *i2s_pdata;
+       struct i2s_platform_data *i2s_pdata = NULL;
 
        struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
@@ -231,20 +231,21 @@ static int acp_hw_init(void *handle)
        adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell),
                                                        GFP_KERNEL);
 
-       if (adev->acp.acp_cell == NULL)
-               return -ENOMEM;
+       if (adev->acp.acp_cell == NULL) {
+               r = -ENOMEM;
+               goto failure;
+       }
 
        adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
        if (adev->acp.acp_res == NULL) {
-               kfree(adev->acp.acp_cell);
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto failure;
        }
 
        i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
        if (i2s_pdata == NULL) {
-               kfree(adev->acp.acp_res);
-               kfree(adev->acp.acp_cell);
-               return -ENOMEM;
+               r = -ENOMEM;
+               goto failure;
        }
 
        switch (adev->asic_type) {
@@ -341,14 +342,14 @@ static int acp_hw_init(void *handle)
        r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
                                                                ACP_DEVS);
        if (r)
-               return r;
+               goto failure;
 
        for (i = 0; i < ACP_DEVS ; i++) {
                dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
                r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
                if (r) {
                        dev_err(dev, "Failed to add dev to genpd\n");
-                       return r;
+                       goto failure;
                }
        }
 
@@ -367,7 +368,8 @@ static int acp_hw_init(void *handle)
                        break;
                if (--count == 0) {
                        dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
-                       return -ETIMEDOUT;
+                       r = -ETIMEDOUT;
+                       goto failure;
                }
                udelay(100);
        }
@@ -384,7 +386,8 @@ static int acp_hw_init(void *handle)
                        break;
                if (--count == 0) {
                        dev_err(&adev->pdev->dev, "Failed to reset ACP\n");
-                       return -ETIMEDOUT;
+                       r = -ETIMEDOUT;
+                       goto failure;
                }
                udelay(100);
        }
@@ -393,6 +396,13 @@ static int acp_hw_init(void *handle)
        val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
        cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
        return 0;
+
+failure:
+       kfree(i2s_pdata);
+       kfree(adev->acp.acp_res);
+       kfree(adev->acp.acp_cell);
+       kfree(adev->acp.acp_genpd);
+       return r;
 }
 
 /**
index 7bcf86c61999513c13c72bcdc1fccc3c24633ff9..85b0515c0fdcf7c0e69558988fdbb534c9efa302 100644 (file)
@@ -140,7 +140,12 @@ int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
        return 0;
 
 error_free:
-       while (i--) {
+       for (i = 0; i < last_entry; ++i) {
+               struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
+
+               amdgpu_bo_unref(&bo);
+       }
+       for (i = first_userptr; i < num_entries; ++i) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo);
 
                amdgpu_bo_unref(&bo);
@@ -270,7 +275,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        r = amdgpu_bo_create_list_entry_array(&args->in, &info);
        if (r)
-               goto error_free;
+               return r;
 
        switch (args->in.operation) {
        case AMDGPU_BO_LIST_OP_CREATE:
@@ -283,8 +288,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
                r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
                mutex_unlock(&fpriv->bo_list_lock);
                if (r < 0) {
-                       amdgpu_bo_list_put(list);
-                       return r;
+                       goto error_put_list;
                }
 
                handle = r;
@@ -306,9 +310,8 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
                mutex_unlock(&fpriv->bo_list_lock);
 
                if (IS_ERR(old)) {
-                       amdgpu_bo_list_put(list);
                        r = PTR_ERR(old);
-                       goto error_free;
+                       goto error_put_list;
                }
 
                amdgpu_bo_list_put(old);
@@ -325,8 +328,10 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
 
        return 0;
 
+error_put_list:
+       amdgpu_bo_list_put(list);
+
 error_free:
-       if (info)
-               kvfree(info);
+       kvfree(info);
        return r;
 }
index 2e53feed40e230f7d7f0733678932ef07873db0f..82823d9a8ba8874e33959ef389465210a91f46f2 100644 (file)
@@ -536,7 +536,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
        list_for_each_entry(lobj, validated, tv.head) {
                struct amdgpu_bo *bo = ttm_to_amdgpu_bo(lobj->tv.bo);
-               bool binding_userptr = false;
                struct mm_struct *usermm;
 
                usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
@@ -553,7 +552,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 
                        amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
                                                     lobj->user_pages);
-                       binding_userptr = true;
                }
 
                if (p->evictable == lobj)
@@ -563,10 +561,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
                if (r)
                        return r;
 
-               if (binding_userptr) {
-                       kvfree(lobj->user_pages);
-                       lobj->user_pages = NULL;
-               }
+               kvfree(lobj->user_pages);
+               lobj->user_pages = NULL;
        }
        return 0;
 }
index 264677ab248a1af2d3a816132b64aca4d4ffd567..2a00a36106b2d6c78cd6536d2d79ae7933dc4bdc 100644 (file)
  * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
  * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
  * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
  */
 #define KMS_DRIVER_MAJOR       3
-#define KMS_DRIVER_MINOR       34
+#define KMS_DRIVER_MINOR       35
 #define KMS_DRIVER_PATCHLEVEL  0
 
 #define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
@@ -1047,6 +1048,41 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
                return -ENODEV;
        }
 
+#ifdef CONFIG_DRM_AMDGPU_SI
+       if (!amdgpu_si_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_TAHITI:
+               case CHIP_PITCAIRN:
+               case CHIP_VERDE:
+               case CHIP_OLAND:
+               case CHIP_HAINAN:
+                       dev_info(&pdev->dev,
+                                "SI support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+       if (!amdgpu_cik_support) {
+               switch (flags & AMD_ASIC_MASK) {
+               case CHIP_KAVERI:
+               case CHIP_BONAIRE:
+               case CHIP_HAWAII:
+               case CHIP_KABINI:
+               case CHIP_MULLINS:
+                       dev_info(&pdev->dev,
+                                "CIK support provided by radeon.\n");
+                       dev_info(&pdev->dev,
+                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
+                               );
+                       return -ENODEV;
+               }
+       }
+#endif
+
        /* Get rid of things like offb */
        ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
        if (ret)
index 554a59b3c4a65cf964bbaf6d0635ddb1f6fc0106..6ee4021910e2cddbad2900f7bb2fe8860f592100 100644 (file)
@@ -165,6 +165,7 @@ struct amdgpu_gfx_config {
        uint32_t num_sc_per_sh;
        uint32_t num_packer_per_sc;
        uint32_t pa_sc_tile_steering_override;
+       uint64_t tcc_disabled_mask;
 };
 
 struct amdgpu_cu_info {
index 9d76e0923a5a3a4d705a0905b63036344b3f3bd7..96b2a31ccfed357956cd347bc0c1b0723d4e9c53 100644 (file)
@@ -218,7 +218,7 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        struct amdgpu_ring *ring = to_amdgpu_ring(sched_job->sched);
        struct dma_fence *fence = NULL, *finished;
        struct amdgpu_job *job;
-       int r;
+       int r = 0;
 
        job = to_amdgpu_job(sched_job);
        finished = &job->base.s_fence->finished;
@@ -243,6 +243,8 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
        job->fence = dma_fence_get(fence);
 
        amdgpu_job_free_resources(job);
+
+       fence = r ? ERR_PTR(r) : fence;
        return fence;
 }
 
index f6147528be6499913ba829517359a7466dd39aea..d55f5baa83d37343edad07b6946f35fb04c698ac 100644 (file)
@@ -144,41 +144,6 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
        struct amdgpu_device *adev;
        int r, acpi_status;
 
-#ifdef CONFIG_DRM_AMDGPU_SI
-       if (!amdgpu_si_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_TAHITI:
-               case CHIP_PITCAIRN:
-               case CHIP_VERDE:
-               case CHIP_OLAND:
-               case CHIP_HAINAN:
-                       dev_info(dev->dev,
-                                "SI support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
-       if (!amdgpu_cik_support) {
-               switch (flags & AMD_ASIC_MASK) {
-               case CHIP_KAVERI:
-               case CHIP_BONAIRE:
-               case CHIP_HAWAII:
-               case CHIP_KABINI:
-               case CHIP_MULLINS:
-                       dev_info(dev->dev,
-                                "CIK support provided by radeon.\n");
-                       dev_info(dev->dev,
-                                "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
-                               );
-                       return -ENODEV;
-               }
-       }
-#endif
-
        adev = kzalloc(sizeof(struct amdgpu_device), GFP_KERNEL);
        if (adev == NULL) {
                return -ENOMEM;
@@ -787,6 +752,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        dev_info.pa_sc_tile_steering_override =
                                adev->gfx.config.pa_sc_tile_steering_override;
 
+               dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask;
+
                return copy_to_user(out, &dev_info,
                                    min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
        }
index 1fead0e8b8909d28e8fe5b2d5bfe6a59634c6b55..7289e1b4fb600e6728765167327c13e193a1b18a 100644 (file)
@@ -453,7 +453,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
                .interruptible = (bp->type != ttm_bo_type_kernel),
                .no_wait_gpu = false,
                .resv = bp->resv,
-               .flags = TTM_OPT_FLAG_ALLOW_RES_EVICT
+               .flags = bp->type != ttm_bo_type_kernel ?
+                       TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
        };
        struct amdgpu_bo *bo;
        unsigned long page_align, size = bp->size;
index b70b3c45bb294f142998aca526e8dcd55920cf15..65044b1b3d4cc2ffce7b5cec9614bd688792a642 100644 (file)
@@ -429,13 +429,14 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
  * Open up a stream for HW test
  */
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 1024;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -444,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib = &job->ibs[0];
 
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        /* stitch together an VCE create msg */
        ib->length_dw = 0;
@@ -476,8 +477,8 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
 
        ib->ptr[ib->length_dw++] = 0x00000014; /* len */
        ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x00000001;
 
        for (i = ib->length_dw; i < ib_size_dw; ++i)
@@ -1110,13 +1111,20 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
 int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
        /* skip vce ring1/2 ib test for now, since it's not reliable */
        if (ring != &ring->adev->vce.ring[0])
                return 0;
 
-       r = amdgpu_vce_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
@@ -1132,5 +1140,7 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
index 30ea54dd91173b57dbf48e9a9d12915f752d0e13..e802f7d9db0a428de7f8192decad0de4cf10b724 100644 (file)
@@ -59,6 +59,7 @@ int amdgpu_vce_entity_init(struct amdgpu_device *adev);
 int amdgpu_vce_suspend(struct amdgpu_device *adev);
 int amdgpu_vce_resume(struct amdgpu_device *adev);
 int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                             struct amdgpu_bo *bo,
                              struct dma_fence **fence);
 int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
                               bool direct, struct dma_fence **fence);
index 7a6beb2e7c4e8aede558f98f9a3b37aff0affc7a..3199e4a5ff126a86156df7d4b94cdd1a53795bdf 100644 (file)
@@ -569,13 +569,14 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
 }
 
 static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
-                             struct dma_fence **fence)
+                                        struct amdgpu_bo *bo,
+                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -583,14 +584,14 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -621,13 +622,14 @@ err:
 }
 
 static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                         struct amdgpu_bo *bo,
+                                         struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -635,14 +637,14 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
        ib->ptr[ib->length_dw++] = 0x0000000b;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
@@ -675,13 +677,20 @@ err:
 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = amdgpu_vcn_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &fence);
+       r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -693,6 +702,8 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index e2fb141ff2e566bec9017a18bdc2d909a00c72b7..5251352f59228733c6009598638332386eb52a5b 100644 (file)
@@ -603,14 +603,12 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
        struct ttm_bo_global *glob = adev->mman.bdev.glob;
        struct amdgpu_vm_bo_base *bo_base;
 
-#if 0
        if (vm->bulk_moveable) {
                spin_lock(&glob->lru_lock);
                ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
                spin_unlock(&glob->lru_lock);
                return;
        }
-#endif
 
        memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
index 638c821611abb2f6582bd572c4bc4bb9c5b278dc..8dfc775626a7a080ee68f58f79dc18462db14cc9 100644 (file)
@@ -93,7 +93,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] =
 {
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_CPF_CLK_CTRL, 0xfcff8fff, 0xf8000100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0x60000ff0, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000000, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -140,7 +140,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xc0000000, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xcd000000, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xf8ff0fff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0x40000ff0, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -179,7 +179,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0x003e001f, 0x003c0014),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_GS_NGG_CLK_CTRL, 0xffff8fff, 0xffff8100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_IA_CLK_CTRL, 0xffff0fff, 0xffff0100),
-       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0xc0000100),
+       SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_CLK_CTRL, 0xff7f0fff, 0x0d000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQ_CLK_CTRL, 0xffffcfff, 0x60000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SQG_CLK_CTRL, 0xffff0fff, 0x40000100),
        SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xffff8fff, 0xffff8100),
@@ -1691,6 +1691,17 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev)
        }
 }
 
+static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
+{
+       /* TCCs are global (not instanced). */
+       uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) |
+                              RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE);
+
+       adev->gfx.config.tcc_disabled_mask =
+               REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) |
+               (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16);
+}
+
 static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
 {
        u32 tmp;
@@ -1702,6 +1713,7 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
 
        gfx_v10_0_setup_rb(adev);
        gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
+       gfx_v10_0_get_tcc_info(adev);
        adev->gfx.config.pa_sc_tile_steering_override =
                gfx_v10_0_init_pa_sc_tile_steering_override(adev);
 
index 8b789f750b72b0ecba603bb2982cd4e8ada03d60..db10640a3b2f3603fd5b0f84b0d329b28836420e 100644 (file)
@@ -151,6 +151,15 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL2, tmp);
 
        tmp = mmGCVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, tmp);
 
        tmp = mmGCVM_L2_CNTL4_DEFAULT;
index 241a4e57cf4a993ea1756523c3b69f208f30a9d7..354e6200ca9a129c903399fbebf6848089266bd1 100644 (file)
@@ -309,6 +309,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
 
        job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
        job->vm_needs_flush = true;
+       job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
        amdgpu_ring_pad_ib(ring, &job->ibs[0]);
        r = amdgpu_job_submit(job, &adev->mman.entity,
                              AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
index 3542c203c3c8f487a0f4dad537fc5f52cdbdf072..b39bea6f54e97b36060029c17bd36cf6cd00760a 100644 (file)
@@ -137,6 +137,15 @@ static void mmhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL2, tmp);
 
        tmp = mmMMVM_L2_CNTL3_DEFAULT;
+       if (adev->gmc.translate_further) {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
+       } else {
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9);
+               tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3,
+                                   L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
+       }
        WREG32_SOC15(MMHUB, 0, mmMMVM_L2_CNTL3, tmp);
 
        tmp = mmMMVM_L2_CNTL4_DEFAULT;
index 85393a99a848be702ba1d5f0faa44d0a3b139784..de9b995b65b1aac85baa62371903d33c3f8c4d74 100644 (file)
@@ -317,10 +317,12 @@ static int nv_asic_reset(struct amdgpu_device *adev)
        struct smu_context *smu = &adev->smu;
 
        if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
-               amdgpu_inc_vram_lost(adev);
+               if (!adev->in_suspend)
+                       amdgpu_inc_vram_lost(adev);
                ret = smu_baco_reset(smu);
        } else {
-               amdgpu_inc_vram_lost(adev);
+               if (!adev->in_suspend)
+                       amdgpu_inc_vram_lost(adev);
                ret = nv_asic_mode1_reset(adev);
        }
 
index 78452cf0115df0dc5ad41d93a167c3c83b40db8c..4554e72c83786960ea11eccedd7b89b9372667e6 100644 (file)
@@ -254,6 +254,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4_3[] = {
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000),
        SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
+       SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000)
 };
 
 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
index fa2f70ce2e2b49bfa5a4c1d116b10a78ddeedb79..f6e81680dd7e8edd03cc2ada50008db924d93692 100644 (file)
@@ -1129,7 +1129,7 @@ static void sdma_v5_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
        amdgpu_ring_write(ring, addr & 0xfffffffc);
        amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
        amdgpu_ring_write(ring, seq); /* reference */
-       amdgpu_ring_write(ring, 0xfffffff); /* mask */
+       amdgpu_ring_write(ring, 0xffffffff); /* mask */
        amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
                          SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
 }
index f70658a536a933cc6bc7f64b8504309dbb617ca1..f8ab80c8801b1bfbce2ec1f1735feb5a857cb21e 100644 (file)
@@ -558,12 +558,14 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
 {
        switch (soc15_asic_reset_method(adev)) {
                case AMD_RESET_METHOD_BACO:
-                       amdgpu_inc_vram_lost(adev);
+                       if (!adev->in_suspend)
+                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_baco_reset(adev);
                case AMD_RESET_METHOD_MODE2:
                        return soc15_mode2_reset(adev);
                default:
-                       amdgpu_inc_vram_lost(adev);
+                       if (!adev->in_suspend)
+                               amdgpu_inc_vram_lost(adev);
                        return soc15_asic_mode1_reset(adev);
        }
 }
@@ -771,8 +773,6 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev)
 #if defined(CONFIG_DRM_AMD_DC)
                 else if (amdgpu_device_has_dc_support(adev))
                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#else
-#       warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15."
 #endif
                amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
                break;
index 670784a78512b82a7c0f5e484746308ad1fd904d..217084d56ab8c86e6b1b16b335bc4faf13258136 100644 (file)
@@ -206,13 +206,14 @@ static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -220,15 +221,15 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -268,13 +269,14 @@ err:
  */
 static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                                        uint32_t handle,
+                                       struct amdgpu_bo *bo,
                                        struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -282,15 +284,15 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00010000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -327,13 +329,20 @@ err:
 static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v6_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -345,6 +354,8 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 01f658fa72c68a904d678fcf9f131dd5f2675924..0995378d8263c495799068bee36b95c00874fe1c 100644 (file)
@@ -214,13 +214,14 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
  * Open up a stream for HW test
  */
 static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
+                                      struct amdgpu_bo *bo,
                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -228,15 +229,15 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001; /* session info */
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
@@ -275,13 +276,14 @@ err:
  * Close up a stream for HW test or if userspace failed to do so
  */
 static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
-                               struct dma_fence **fence)
+                                       struct amdgpu_bo *bo,
+                                       struct dma_fence **fence)
 {
        const unsigned ib_size_dw = 16;
        struct amdgpu_job *job;
        struct amdgpu_ib *ib;
        struct dma_fence *f = NULL;
-       uint64_t dummy;
+       uint64_t addr;
        int i, r;
 
        r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -289,15 +291,15 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
                return r;
 
        ib = &job->ibs[0];
-       dummy = ib->gpu_addr + 1024;
+       addr = amdgpu_bo_gpu_offset(bo);
 
        ib->length_dw = 0;
        ib->ptr[ib->length_dw++] = 0x00000018;
        ib->ptr[ib->length_dw++] = 0x00000001;
        ib->ptr[ib->length_dw++] = handle;
        ib->ptr[ib->length_dw++] = 0x00000000;
-       ib->ptr[ib->length_dw++] = upper_32_bits(dummy);
-       ib->ptr[ib->length_dw++] = dummy;
+       ib->ptr[ib->length_dw++] = upper_32_bits(addr);
+       ib->ptr[ib->length_dw++] = addr;
 
        ib->ptr[ib->length_dw++] = 0x00000014;
        ib->ptr[ib->length_dw++] = 0x00000002;
@@ -334,13 +336,20 @@ err:
 static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 {
        struct dma_fence *fence = NULL;
+       struct amdgpu_bo *bo = NULL;
        long r;
 
-       r = uvd_v7_0_enc_get_create_msg(ring, 1, NULL);
+       r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
+                                     AMDGPU_GEM_DOMAIN_VRAM,
+                                     &bo, NULL, NULL);
+       if (r)
+               return r;
+
+       r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
        if (r)
                goto error;
 
-       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, &fence);
+       r = uvd_v7_0_enc_get_destroy_msg(ring, 1, bo, &fence);
        if (r)
                goto error;
 
@@ -352,6 +361,8 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
 
 error:
        dma_fence_put(fence);
+       amdgpu_bo_unreserve(bo);
+       amdgpu_bo_unref(&bo);
        return r;
 }
 
index 8cab6da512a065b5c1f9c0bd5a0c86cce8238fd2..a52f0b13a2c8a1e6ea8f6682fffc74a2bb114319 100644 (file)
@@ -2385,8 +2385,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
 
        if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
                dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
-       if (adev->asic_type == CHIP_RENOIR)
-               dm->dc->debug.disable_stutter = true;
 
        return 0;
 fail:
@@ -6019,7 +6017,9 @@ static void amdgpu_dm_enable_crtc_interrupts(struct drm_device *dev,
        struct drm_crtc *crtc;
        struct drm_crtc_state *old_crtc_state, *new_crtc_state;
        int i;
+#ifdef CONFIG_DEBUG_FS
        enum amdgpu_dm_pipe_crc_source source;
+#endif
 
        for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
                                      new_crtc_state, i) {
index 985633c08a26725a5315557dbadd0f13f3c2fa76..26c6d735cdc78523af4d6186ac05c50cf7845512 100644 (file)
 # It calculates Bandwidth and Watermarks values for HW programming
 #
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+calcs_ccflags := -mhard-float -msse
 
-calcs_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+calcs_ccflags += -mpreferred-stack-boundary=4
+else
 calcs_ccflags += -msse2
 endif
 
index 5d1adeda4d90fa189bfa5dc36bea2bdce30cf72a..4b8819c27fcda3a3011e58f2a5de081cd8ccd25d 100644 (file)
@@ -580,6 +580,10 @@ static bool construct(struct dc *dc,
 #ifdef CONFIG_DRM_AMD_DC_DCN2_0
        // Allocate memory for the vm_helper
        dc->vm_helper = kzalloc(sizeof(struct vm_helper), GFP_KERNEL);
+       if (!dc->vm_helper) {
+               dm_error("%s: failed to create dc->vm_helper\n", __func__);
+               goto fail;
+       }
 
 #endif
        memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
index ca20b150afcc237ba21f8ad46f4bc3bb3b1d97d4..9c58670d541402d0f0b5780cfaa22d3dff4346b2 100644 (file)
@@ -2767,6 +2767,15 @@ void core_link_enable_stream(
                                        CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
                                        COLOR_DEPTH_UNDEFINED);
 
+               /* This second call is needed to reconfigure the DIG
+                * as a workaround for the incorrect value being applied
+                * from transmitter control.
+                */
+               if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+                       stream->link->link_enc->funcs->setup(
+                               stream->link->link_enc,
+                               pipe_ctx->stream->signal);
+
 #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
                if (pipe_ctx->stream->timing.flags.DSC) {
                        if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
index 505967b48e144b7701dc0c70f92299636c6965ad..51991bf26a93cb9ee1484d5a45856331d242c2c4 100644 (file)
@@ -374,6 +374,7 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
        enum display_dongle_type *dongle = &sink_cap->dongle_type;
        uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE];
        bool is_type2_dongle = false;
+       int retry_count = 2;
        struct dp_hdmi_dongle_signature_data *dongle_signature;
 
        /* Assume we have no valid DP passive dongle connected */
@@ -386,13 +387,24 @@ void dal_ddc_service_i2c_query_dp_dual_mode_adaptor(
                DP_HDMI_DONGLE_ADDRESS,
                type2_dongle_buf,
                sizeof(type2_dongle_buf))) {
-               *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
-               sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
+               /* Passive HDMI dongles can sometimes fail here without retrying*/
+               while (retry_count > 0) {
+                       if (i2c_read(ddc,
+                               DP_HDMI_DONGLE_ADDRESS,
+                               type2_dongle_buf,
+                               sizeof(type2_dongle_buf)))
+                               break;
+                       retry_count--;
+               }
+               if (retry_count == 0) {
+                       *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
+                       sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
 
-               CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
-                               "DP-DVI passive dongle %dMhz: ",
-                               DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
-               return;
+                       CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+                                       "DP-DVI passive dongle %dMhz: ",
+                                       DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
+                       return;
+               }
        }
 
        /* Check if Type 2 dongle.*/
index 8f70295179ffa2aad20982f28d79da8661ff0496..f25ac17f47fa9f7da19512a19db07297ce91a660 100644 (file)
@@ -404,6 +404,9 @@ bool resource_are_streams_timing_synchronizable(
        if (stream1->view_format != stream2->view_format)
                return false;
 
+       if (stream1->ignore_msa_timing_param || stream2->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 static bool is_dp_and_hdmi_sharable(
@@ -1540,6 +1543,9 @@ bool dc_is_stream_unchanged(
        if (!are_stream_backends_same(old_stream, stream))
                return false;
 
+       if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
+               return false;
+
        return true;
 }
 
index 1787b9bf800a0c70c165a7ff477df485dbdb24de..76d54885374abbf08df4b16041004da78d934afd 100644 (file)
@@ -668,6 +668,7 @@ struct clock_source *dce100_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 318e9c2e2ca88c56f03c68099cd1a6bc71393736..89620adc81d8b338115b07775a1b7f2452188283 100644 (file)
@@ -714,6 +714,7 @@ struct clock_source *dce110_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 83e1878161c922108027fdd1a9697a5e164d44cb..21a657e79306362432bf0f3d7a8723d849716e2f 100644 (file)
@@ -687,6 +687,7 @@ struct clock_source *dce112_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 8b85e5274bbadd97b89691b346bcb98e46d74d45..7c52f7f9196c9595d60064d0b05ecbe41a792dad 100644 (file)
@@ -500,6 +500,7 @@ static struct clock_source *dce120_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 4625df9f9fd2f867bc248836e3b7c149150db7b7..643ccb0ade006fac359c70aa74e323712771336f 100644 (file)
@@ -701,6 +701,7 @@ struct clock_source *dce80_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 01c7e30b9ce1786d0f333213e511ee9d45572fff..bbd6e01b3eca41df79079ce78bf426c6ee31c41b 100644 (file)
@@ -393,6 +393,10 @@ bool cm_helper_translate_curve_to_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        // All 3 color channels have same x
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
@@ -464,13 +468,6 @@ bool cm_helper_translate_curve_to_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
@@ -562,6 +559,10 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
        rgb_resulted[hw_points - 1].green = output_tf->tf_pts.green[start_index];
        rgb_resulted[hw_points - 1].blue = output_tf->tf_pts.blue[start_index];
 
+       rgb_resulted[hw_points].red = rgb_resulted[hw_points - 1].red;
+       rgb_resulted[hw_points].green = rgb_resulted[hw_points - 1].green;
+       rgb_resulted[hw_points].blue = rgb_resulted[hw_points - 1].blue;
+
        corner_points[0].red.x = dc_fixpt_pow(dc_fixpt_from_int(2),
                                             dc_fixpt_from_int(region_start));
        corner_points[0].green.x = corner_points[0].red.x;
@@ -624,13 +625,6 @@ bool cm_helper_translate_curve_to_degamma_hw_format(
 
        i = 1;
        while (i != hw_points + 1) {
-               if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
-                       rgb_plus_1->red = rgb->red;
-               if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
-                       rgb_plus_1->green = rgb->green;
-               if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
-                       rgb_plus_1->blue = rgb->blue;
-
                rgb->delta_red   = dc_fixpt_sub(rgb_plus_1->red,   rgb->red);
                rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
                rgb->delta_blue  = dc_fixpt_sub(rgb_plus_1->blue,  rgb->blue);
index 59305e411a666319bf87f4f621f7ea62b0d7f205..1599bb97111114dc2eec402f7fdb99e377799830 100644 (file)
@@ -786,6 +786,7 @@ struct clock_source *dcn10_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index ddb8d5649e79184f901161668c91fa3098416406..63f3bddba7daaa50ab93dbd9a42fc13327208d84 100644 (file)
@@ -10,15 +10,20 @@ ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT
 DCN20 += dcn20_dsc.o
 endif
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse
 
-CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4
+else
 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2
 endif
 
index b4e3ce22ed5294ee3b96c0df73109f5856e931de..dfb208285a9cdd6cb817afbc3cf0ac3a1964200b 100644 (file)
@@ -814,7 +814,7 @@ static const struct resource_caps res_cap_nv14 = {
                .num_audio = 6,
                .num_stream_encoder = 5,
                .num_pll = 5,
-               .num_dwb = 0,
+               .num_dwb = 1,
                .num_ddc = 5,
 };
 
@@ -1077,6 +1077,7 @@ struct clock_source *dcn20_clock_source_create(
                return &clk_src->base;
        }
 
+       kfree(clk_src);
        BREAK_TO_DEBUGGER();
        return NULL;
 }
index 8cd9de8b1a7a191d662a0a6a5c37c484bc01e5f2..ff50ae71fe277b0052440a9a67d175c3c2293b6a 100644 (file)
@@ -3,7 +3,22 @@
 
 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o
 
-CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
+
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2
+endif
 
 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
 
index 5b2a65b424036e07935122dfb38d0a529097c5d5..8df251626e22bf10fd3e8924bea490850b04b228 100644 (file)
 # It provides the general basic services required by other DAL
 # subcomponents.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dml_ccflags := -mhard-float -msse
 
-dml_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dml_ccflags += -mpreferred-stack-boundary=4
+else
 dml_ccflags += -msse2
 endif
 
index 649883777f62a6ff9e08c7154ea8204138f05afa..6c6c486b774a4ea8d043ba8cae61a4ac6a36cbd9 100644 (file)
@@ -2577,7 +2577,8 @@ static void dml20_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPer
                        mode_lib->vba.MinActiveDRAMClockChangeMargin
                                        + mode_lib->vba.DRAMClockChangeLatency;
 
-       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) {
+       if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) {
+               mode_lib->vba.DRAMClockChangeWatermark += 25;
                mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
        } else {
                if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) {
index 456cd0e3289c6666a6177cb7b125aaf9f02fe1c9..3b6ed60dcd3510115833bb418d1ccef3d614998b 100644 (file)
@@ -39,9 +39,6 @@
  * ways. Unless there is something clearly wrong with it the code should
  * remain as-is as it provides us with a guarantee from HW that it is correct.
  */
-
-typedef unsigned int uint;
-
 typedef struct {
        double DPPCLK;
        double DISPCLK;
@@ -4774,7 +4771,7 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
                                mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
                                mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
                                for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
-                                       uint m;
+                                       unsigned int m;
 
                                        locals->cursor_bw[k] = 0;
                                        locals->cursor_bw_pre[k] = 0;
@@ -5285,7 +5282,7 @@ static void CalculateWatermarksAndDRAMSpeedChangeSupport(
        double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank;
        double FullDETBufferingTimeYStutterCriticalPlane = 0;
        double TimeToFinishSwathTransferStutterCriticalPlane = 0;
-       uint k, j;
+       unsigned int k, j;
 
        mode_lib->vba.TotalActiveDPP = 0;
        mode_lib->vba.TotalDCCActiveDPP = 0;
@@ -5507,7 +5504,7 @@ static void CalculateDCFCLKDeepSleep(
                double DPPCLK[],
                double *DCFCLKDeepSleep)
 {
-       uint k;
+       unsigned int k;
        double DisplayPipeLineDeliveryTimeLuma;
        double DisplayPipeLineDeliveryTimeChroma;
        //double   DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX];
@@ -5727,7 +5724,7 @@ static void CalculatePixelDeliveryTimes(
                double DisplayPipeRequestDeliveryTimeChromaPrefetch[])
 {
        double req_per_swath_ub;
-       uint k;
+       unsigned int k;
 
        for (k = 0; k < NumberOfActivePlanes; ++k) {
                if (VRatio[k] <= 1) {
@@ -5869,7 +5866,7 @@ static void CalculateMetaAndPTETimes(
        unsigned int dpte_groups_per_row_chroma_ub;
        unsigned int num_group_per_lower_vm_stage;
        unsigned int num_req_per_lower_vm_stage;
-       uint k;
+       unsigned int k;
 
        for (k = 0; k < NumberOfActivePlanes; ++k) {
                if (GPUVMEnable == true) {
index b456cd23c6fa46e48b7a5b40f9d76074e57b4396..970737217e53a18ee4331e3cc3aa570c63a79780 100644 (file)
@@ -1,15 +1,20 @@
 #
 # Makefile for the 'dsc' sub-component of DAL.
 
-ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
-       cc_stack_align := -mpreferred-stack-boundary=4
-else ifneq ($(call cc-option, -mstack-alignment=16),)
-       cc_stack_align := -mstack-alignment=16
-endif
+dsc_ccflags := -mhard-float -msse
 
-dsc_ccflags := -mhard-float -msse $(cc_stack_align)
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+endif
 
-ifdef CONFIG_CC_IS_CLANG
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+dsc_ccflags += -mpreferred-stack-boundary=4
+else
 dsc_ccflags += -msse2
 endif
 
index 33960fb38a5d7324f2637ff435adfa37d48c2bc0..4acf139ea014099beb2616344a462e119203eedb 100644 (file)
@@ -843,6 +843,8 @@ static int smu_sw_init(void *handle)
        smu->smu_baco.state = SMU_BACO_STATE_EXIT;
        smu->smu_baco.platform_support = false;
 
+       mutex_init(&smu->sensor_lock);
+
        smu->watermarks_bitmap = 0;
        smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
        smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
index f1f072012facb18dd156ef964fbe4455c0176de2..d493a3f8c07a63bd570b25905e965f854bf74e90 100644 (file)
@@ -1018,6 +1018,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
        if (!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1044,6 +1045,7 @@ static int arcturus_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index d08493b67b67d1d2d67985777ea7003c29593fb6..beacfffbdc3eba36468ca5164540e419473fbd48 100644 (file)
@@ -5098,9 +5098,7 @@ static void vega10_odn_update_soc_table(struct pp_hwmgr *hwmgr,
 
        if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
-               for (i = 0; i < podn_vdd_dep->count - 1; i++)
-                       od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
-               if (od_vddc_lookup_table->entries[i].us_vdd < podn_vdd_dep->entries[i].vddc)
+               for (i = 0; i < podn_vdd_dep->count; i++)
                        od_vddc_lookup_table->entries[i].us_vdd = podn_vdd_dep->entries[i].vddc;
        } else if (type == PP_OD_EDIT_MCLK_VDDC_TABLE) {
                podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
index 6109815a0401a7c8835e961a0e43f7335d1d490c..23171a4d9a31cfed921873f504f629580bb3ac38 100644 (file)
@@ -344,6 +344,7 @@ struct smu_context
        const struct smu_funcs          *funcs;
        const struct pptable_funcs      *ppt_funcs;
        struct mutex                    mutex;
+       struct mutex                    sensor_lock;
        uint64_t pool_size;
 
        struct smu_table_context        smu_table;
index 12c0e469bf351e34b4789c59790fa11699bb353f..0b461404af6b489972fd62dca5217c8bc32191cc 100644 (file)
@@ -547,7 +547,7 @@ static int navi10_get_metrics_table(struct smu_context *smu,
        struct smu_table_context *smu_table= &smu->smu_table;
        int ret = 0;
 
-       if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) {
+       if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) {
                ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
                                (void *)smu_table->metrics_table, false);
                if (ret) {
@@ -1386,6 +1386,7 @@ static int navi10_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1409,6 +1410,7 @@ static int navi10_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index dc754447f0ddc7a84592ccdfb19e3bddc561061d..23c12018dbc18c582a99610d8575d6555861c7f7 100644 (file)
@@ -655,7 +655,7 @@ static int polaris10_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        count = SMU_MAX_SMIO_LEVELS;
                for (level = 0; level < count; level++) {
                        table->SmioTable2.Pattern[level].Voltage =
-                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                               PP_HOST_TO_SMC_US(data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
                        /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
                        table->SmioTable2.Pattern[level].Smio =
                                (uint8_t) level;
index 7c960b07746fd28da3f44920a65092f15c517848..ae18fbcb26fb19ce8ad203c649d22c017717c423 100644 (file)
@@ -456,7 +456,7 @@ static int vegam_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
                        count = SMU_MAX_SMIO_LEVELS;
                for (level = 0; level < count; level++) {
                        table->SmioTable2.Pattern[level].Voltage = PP_HOST_TO_SMC_US(
-                                       data->mvdd_voltage_table.entries[count].value * VOLTAGE_SCALE);
+                                       data->mvdd_voltage_table.entries[level].value * VOLTAGE_SCALE);
                        /* Index into DpmTable.Smio. Drive bits from Smio entry to get this voltage level.*/
                        table->SmioTable2.Pattern[level].Smio =
                                (uint8_t) level;
index 64386ee3f878105286077773bcb14def63e4119c..bbd8ebd58434bd2b00316a5d71bbbfd05d456fce 100644 (file)
@@ -3023,6 +3023,7 @@ static int vega20_read_sensor(struct smu_context *smu,
        if(!data || !size)
                return -EINVAL;
 
+       mutex_lock(&smu->sensor_lock);
        switch (sensor) {
        case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
                *(uint32_t *)data = pptable->FanMaximumRpm;
@@ -3048,6 +3049,7 @@ static int vega20_read_sensor(struct smu_context *smu,
        default:
                ret = smu_smc_read_sensor(smu, sensor, data, size);
        }
+       mutex_unlock(&smu->sensor_lock);
 
        return ret;
 }
index 8820ce15ce375df111338db44d59821018cfc51c..ae274902ff92439f048d3d04ef034ef0f29d4b61 100644 (file)
@@ -82,7 +82,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
 
        drm_atomic_helper_commit_modeset_disables(dev, old_state);
 
-       drm_atomic_helper_commit_planes(dev, old_state, 0);
+       drm_atomic_helper_commit_planes(dev, old_state,
+                                       DRM_PLANE_COMMIT_ACTIVE_ONLY);
 
        drm_atomic_helper_commit_modeset_enables(dev, old_state);
 
index ea26bc9c2d0012e7c4703227464f462b70f1fd4b..b848270e0a1f4323c1a0fd7cc18e11afcd159169 100644 (file)
@@ -564,8 +564,8 @@ komeda_splitter_validate(struct komeda_splitter *splitter,
        }
 
        if (!in_range(&splitter->vsize, dflow->in_h)) {
-               DRM_DEBUG_ATOMIC("split in_in: %d exceed the acceptable range.\n",
-                                dflow->in_w);
+               DRM_DEBUG_ATOMIC("split in_h: %d exceeds the acceptable range.\n",
+                                dflow->in_h);
                return -EINVAL;
        }
 
index 2851cac94d8699883dbaa9a4e9b8f202e33da0eb..b72840c06ab76771593f024f949c05af2bb8fa11 100644 (file)
@@ -43,9 +43,8 @@ komeda_wb_encoder_atomic_check(struct drm_encoder *encoder,
        struct komeda_data_flow_cfg dflow;
        int err;
 
-       if (!writeback_job || !writeback_job->fb) {
+       if (!writeback_job)
                return 0;
-       }
 
        if (!crtc_st->active) {
                DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n");
@@ -166,8 +165,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
                                           &komeda_wb_encoder_helper_funcs,
                                           formats, n_formats);
        komeda_put_fourcc_list(formats);
-       if (err)
+       if (err) {
+               kfree(kwb_conn);
                return err;
+       }
 
        drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs);
 
index 22c0847986df908d2393d1611b9c6d443e51a40d..875a3a9eabfa1e08068dd265c5dbfc7cf3f36411 100644 (file)
@@ -131,7 +131,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
        struct drm_framebuffer *fb;
        int i, n_planes;
 
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        fb = conn_state->writeback_job->fb;
@@ -248,7 +248,7 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
 
        mw_state = to_mw_state(conn_state);
 
-       if (conn_state->writeback_job && conn_state->writeback_job->fb) {
+       if (conn_state->writeback_job) {
                struct drm_framebuffer *fb = conn_state->writeback_job->fb;
 
                DRM_DEV_DEBUG_DRIVER(drm->dev,
index cebc8e6208208815b9334671ee13b958516d54d9..8a8d605021f04e873118414ea4e13d19a400815d 100644 (file)
@@ -728,6 +728,8 @@ static int tc_set_video_mode(struct tc_data *tc,
        int lower_margin = mode->vsync_start - mode->vdisplay;
        int vsync_len = mode->vsync_end - mode->vsync_start;
        u32 dp0_syncval;
+       u32 bits_per_pixel = 24;
+       u32 in_bw, out_bw;
 
        /*
         * Recommended maximum number of symbols transferred in a transfer unit:
@@ -735,7 +737,10 @@ static int tc_set_video_mode(struct tc_data *tc,
         *              (output active video bandwidth in bytes))
         * Must be less than tu_size.
         */
-       max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
+       in_bw = mode->clock * bits_per_pixel / 8;
+       out_bw = tc->link.base.num_lanes * tc->link.base.rate;
+       max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw);
 
        dev_dbg(tc->dev, "set mode %dx%d\n",
                mode->hdisplay, mode->vdisplay);
index 419381abbdd16ee8b6a63c56113fc3e32f93b0b4..14aeaf736321003c979c6dbf73e0940c4cc6d1d3 100644 (file)
@@ -430,10 +430,15 @@ static int drm_atomic_connector_check(struct drm_connector *connector,
                return -EINVAL;
        }
 
-       if (writeback_job->out_fence && !writeback_job->fb) {
-               DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
-                                connector->base.id, connector->name);
-               return -EINVAL;
+       if (!writeback_job->fb) {
+               if (writeback_job->out_fence) {
+                       DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n",
+                                        connector->base.id, connector->name);
+                       return -EINVAL;
+               }
+
+               drm_writeback_cleanup_job(writeback_job);
+               state->writeback_job = NULL;
        }
 
        return 0;
index 82a4ceed3fcf5a77bbb87b667b7c8375e9a19863..6b0177112e18d97e05514037a9f37f7c13fb7fc9 100644 (file)
@@ -159,6 +159,9 @@ static const struct edid_quirk {
        /* Medion MD 30217 PG */
        { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
 
+       /* Lenovo G50 */
+       { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+
        /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
        { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
 
index ff138b6ec48badf5cc6869e86688cb12ea57d16a..43d9e3bb3a94344fd81d7886ec6c2d412d9b7db1 100644 (file)
@@ -324,6 +324,9 @@ void drm_writeback_cleanup_job(struct drm_writeback_job *job)
        if (job->fb)
                drm_framebuffer_put(job->fb);
 
+       if (job->out_fence)
+               dma_fence_put(job->out_fence);
+
        kfree(job);
 }
 EXPORT_SYMBOL(drm_writeback_cleanup_job);
@@ -366,25 +369,29 @@ drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
 {
        unsigned long flags;
        struct drm_writeback_job *job;
+       struct dma_fence *out_fence;
 
        spin_lock_irqsave(&wb_connector->job_lock, flags);
        job = list_first_entry_or_null(&wb_connector->job_queue,
                                       struct drm_writeback_job,
                                       list_entry);
-       if (job) {
+       if (job)
                list_del(&job->list_entry);
-               if (job->out_fence) {
-                       if (status)
-                               dma_fence_set_error(job->out_fence, status);
-                       dma_fence_signal(job->out_fence);
-                       dma_fence_put(job->out_fence);
-               }
-       }
+
        spin_unlock_irqrestore(&wb_connector->job_lock, flags);
 
        if (WARN_ON(!job))
                return;
 
+       out_fence = job->out_fence;
+       if (out_fence) {
+               if (status)
+                       dma_fence_set_error(out_fence, status);
+               dma_fence_signal(out_fence);
+               dma_fence_put(out_fence);
+               job->out_fence = NULL;
+       }
+
        INIT_WORK(&job->cleanup_work, cleanup_work);
        queue_work(system_long_wq, &job->cleanup_work);
 }
index 698db540972c3a09ec3f3147bd87eea156681f4a..648cf0207309219362c7d94df777fc00fece30d1 100644 (file)
@@ -180,6 +180,8 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                              etnaviv_cmdbuf_get_va(&submit->cmdbuf,
                                        &gpu->mmu_context->cmdbuf_mapping));
 
+       mutex_unlock(&gpu->mmu_context->lock);
+
        /* Reserve space for the bomap */
        if (n_bomap_pages) {
                bomap_start = bomap = iter.data;
@@ -221,8 +223,6 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
                                         obj->base.size);
        }
 
-       mutex_unlock(&gpu->mmu_context->lock);
-
        etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
 
        dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
index 043111a1d60c4f9712c96abb427bbcfb4816d2e3..f8bf488e9d717360aa21a9f7f3db45ed25dd5278 100644 (file)
@@ -155,9 +155,11 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_context *context, void *bu
 
        memcpy(buf, v2_context->mtlb_cpu, SZ_4K);
        buf += SZ_4K;
-       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++, buf += SZ_4K)
-               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT)
+       for (i = 0; i < MMUv2_MAX_STLB_ENTRIES; i++)
+               if (v2_context->mtlb_cpu[i] & MMUv2_PTE_PRESENT) {
                        memcpy(buf, v2_context->stlb_cpu[i], SZ_4K);
+                       buf += SZ_4K;
+               }
 }
 
 static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu,
index 35ebae6a1be7ddbbbe9b42d8f16349a082fc155a..3607d348c2980f1332e8a7a02faa80ce6619814c 100644 (file)
@@ -328,12 +328,23 @@ etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
 
        ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
                                          global->memory_base);
-       if (ret) {
-               global->ops->free(ctx);
-               return NULL;
+       if (ret)
+               goto out_free;
+
+       if (global->version == ETNAVIV_IOMMU_V1 &&
+           ctx->cmdbuf_mapping.iova > 0x80000000) {
+               dev_err(global->dev,
+                       "command buffer outside valid memory window\n");
+               goto out_unmap;
        }
 
        return ctx;
+
+out_unmap:
+       etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
+out_free:
+       global->ops->free(ctx);
+       return NULL;
 }
 
 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
index efb39f350b19643b5c6d2418a96c08d128d54dee..3250c1b8dccaebe864bd2eecdc8b10a88fa8221a 100644 (file)
@@ -1270,7 +1270,7 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
                              "disabling port %c DVI/HDMI support\n",
                              port_name(port), info->alternate_ddc_pin,
-                             port_name(p), port_name(port));
+                             port_name(p), port_name(p));
 
                /*
                 * If we have multiple ports supposedly sharing the
@@ -1278,9 +1278,14 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
                 * port. Otherwise they share the same ddc bin and
                 * system couldn't communicate with them separately.
                 *
-                * Give child device order the priority, first come first
-                * served.
+                * Give inverse child device order the priority,
+                * last one wins. Yes, there are real machines
+                * (eg. Asrock B250M-HDV) where VBT has both
+                * port A and port E with the same AUX ch and
+                * we must pick port E :(
                 */
+               info = &dev_priv->vbt.ddi_port_info[p];
+
                info->supports_dvi = false;
                info->supports_hdmi = false;
                info->alternate_ddc_pin = 0;
@@ -1316,7 +1321,7 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
                DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
                              "disabling port %c DP support\n",
                              port_name(port), info->alternate_aux_channel,
-                             port_name(p), port_name(port));
+                             port_name(p), port_name(p));
 
                /*
                 * If we have multiple ports supposedlt sharing the
@@ -1324,9 +1329,14 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
                 * port. Otherwise they share the same aux channel
                 * and system couldn't communicate with them separately.
                 *
-                * Give child device order the priority, first come first
-                * served.
+                * Give inverse child device order the priority,
+                * last one wins. Yes, there are real machines
+                * (eg. Asrock B250M-HDV) where VBT has both
+                * port A and port E with the same AUX ch and
+                * we must pick port E :(
                 */
+               info = &dev_priv->vbt.ddi_port_info[p];
+
                info->supports_dp = false;
                info->alternate_aux_channel = 0;
        }
index b51d1ceb873937761dc16bd2690e3fc3fe85c9db..dfff6f4357b85a7307db60298d3f5b441011fe94 100644 (file)
@@ -3280,7 +3280,20 @@ static int skl_max_plane_width(const struct drm_framebuffer *fb,
        switch (fb->modifier) {
        case DRM_FORMAT_MOD_LINEAR:
        case I915_FORMAT_MOD_X_TILED:
-               return 4096;
+               /*
+                * Validated limit is 4k, but has 5k should
+                * work apart from the following features:
+                * - Ytile (already limited to 4k)
+                * - FP16 (already limited to 4k)
+                * - render compression (already limited to 4k)
+                * - KVMR sprite and cursor (don't care)
+                * - horizontal panning (TODO verify this)
+                * - pipe and plane scaling (TODO verify this)
+                */
+               if (cpp == 8)
+                       return 4096;
+               else
+                       return 5120;
        case I915_FORMAT_MOD_Y_TILED_CCS:
        case I915_FORMAT_MOD_Yf_TILED_CCS:
                /* FIXME AUX plane? */
@@ -7261,7 +7274,7 @@ retry:
        pipe_config->fdi_lanes = lane;
 
        intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
-                              link_bw, &pipe_config->fdi_m_n, false);
+                              link_bw, &pipe_config->fdi_m_n, false, false);
 
        ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
        if (ret == -EDEADLK)
@@ -7508,11 +7521,15 @@ void
 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
                       int pixel_clock, int link_clock,
                       struct intel_link_m_n *m_n,
-                      bool constant_n)
+                      bool constant_n, bool fec_enable)
 {
-       m_n->tu = 64;
+       u32 data_clock = bits_per_pixel * pixel_clock;
+
+       if (fec_enable)
+               data_clock = intel_dp_mode_to_fec_clock(data_clock);
 
-       compute_m_n(bits_per_pixel * pixel_clock,
+       m_n->tu = 64;
+       compute_m_n(data_clock,
                    link_clock * nlanes * 8,
                    &m_n->gmch_m, &m_n->gmch_n,
                    constant_n);
@@ -9298,7 +9315,6 @@ static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
 {
        struct intel_encoder *encoder;
-       bool pch_ssc_in_use = false;
        bool has_fdi = false;
 
        for_each_intel_encoder(&dev_priv->drm, encoder) {
@@ -9326,22 +9342,24 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
         * clock hierarchy. That would also allow us to do
         * clock bending finally.
         */
+       dev_priv->pch_ssc_use = 0;
+
        if (spll_uses_pch_ssc(dev_priv)) {
                DRM_DEBUG_KMS("SPLL using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
                DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
        }
 
        if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
                DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
-               pch_ssc_in_use = true;
+               dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
        }
 
-       if (pch_ssc_in_use)
+       if (dev_priv->pch_ssc_use)
                return;
 
        if (has_fdi) {
index e57e6969051d063681b35490ea8d281dfdb85868..01fa87ad327092f543b20757e5e7abebc614589a 100644 (file)
@@ -414,7 +414,7 @@ enum phy_fia {
 void intel_link_compute_m_n(u16 bpp, int nlanes,
                            int pixel_clock, int link_clock,
                            struct intel_link_m_n *m_n,
-                           bool constant_n);
+                           bool constant_n, bool fec_enable);
 bool is_ccs_modifier(u64 modifier);
 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv);
 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
index 921ad0a2f7ba7c743bea90622216d019846a6f22..57e9f0ba331b4e7a31ecd4898dfc5410020c3980 100644 (file)
@@ -78,8 +78,8 @@
 #define DP_DSC_MAX_ENC_THROUGHPUT_0            340000
 #define DP_DSC_MAX_ENC_THROUGHPUT_1            400000
 
-/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
-#define DP_DSC_FEC_OVERHEAD_FACTOR             976
+/* DP DSC FEC Overhead factor = 1/(0.972261) */
+#define DP_DSC_FEC_OVERHEAD_FACTOR             972261
 
 /* Compliance test status bits  */
 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -494,6 +494,97 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
        return 0;
 }
 
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
+{
+       return div_u64(mul_u32_u32(mode_clock, 1000000U),
+                      DP_DSC_FEC_OVERHEAD_FACTOR);
+}
+
+static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count,
+                                      u32 mode_clock, u32 mode_hdisplay)
+{
+       u32 bits_per_pixel, max_bpp_small_joiner_ram;
+       int i;
+
+       /*
+        * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
+        * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
+        * for SST -> TimeSlotsPerMTP is 1,
+        * for MST -> TimeSlotsPerMTP has to be calculated
+        */
+       bits_per_pixel = (link_clock * lane_count * 8) /
+                        intel_dp_mode_to_fec_clock(mode_clock);
+       DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+
+       /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
+       max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay;
+       DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+
+       /*
+        * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
+        * check, output bpp from small joiner RAM check)
+        */
+       bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
+
+       /* Error out if the max bpp is less than smallest allowed valid bpp */
+       if (bits_per_pixel < valid_dsc_bpp[0]) {
+               DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
+                             bits_per_pixel, valid_dsc_bpp[0]);
+               return 0;
+       }
+
+       /* Find the nearest match in the array of known BPPs from VESA */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
+               if (bits_per_pixel < valid_dsc_bpp[i + 1])
+                       break;
+       }
+       bits_per_pixel = valid_dsc_bpp[i];
+
+       /*
+        * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
+        * fractional part is 0
+        */
+       return bits_per_pixel << 4;
+}
+
+static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
+                                      int mode_clock, int mode_hdisplay)
+{
+       u8 min_slice_count, i;
+       int max_slice_width;
+
+       if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_0);
+       else
+               min_slice_count = DIV_ROUND_UP(mode_clock,
+                                              DP_DSC_MAX_ENC_THROUGHPUT_1);
+
+       max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
+       if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
+               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
+                             max_slice_width);
+               return 0;
+       }
+       /* Also take into account max slice width */
+       min_slice_count = min_t(u8, min_slice_count,
+                               DIV_ROUND_UP(mode_hdisplay,
+                                            max_slice_width));
+
+       /* Find the closest match to the valid slice count values */
+       for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
+               if (valid_dsc_slicecount[i] >
+                   drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
+                                                   false))
+                       break;
+               if (min_slice_count  <= valid_dsc_slicecount[i])
+                       return valid_dsc_slicecount[i];
+       }
+
+       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
+       return 0;
+}
+
 static enum drm_mode_status
 intel_dp_mode_valid(struct drm_connector *connector,
                    struct drm_display_mode *mode)
@@ -2226,7 +2317,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                               adjusted_mode->crtc_clock,
                               pipe_config->port_clock,
                               &pipe_config->dp_m_n,
-                              constant_n);
+                              constant_n, pipe_config->fec_enable);
 
        if (intel_connector->panel.downclock_mode != NULL &&
                dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
@@ -2236,7 +2327,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                                               intel_connector->panel.downclock_mode->clock,
                                               pipe_config->port_clock,
                                               &pipe_config->dp_m2_n2,
-                                              constant_n);
+                                              constant_n, pipe_config->fec_enable);
        }
 
        if (!HAS_DDI(dev_priv))
@@ -4323,91 +4414,6 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
                DP_DPRX_ESI_LEN;
 }
 
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
-                               int mode_clock, int mode_hdisplay)
-{
-       u16 bits_per_pixel, max_bpp_small_joiner_ram;
-       int i;
-
-       /*
-        * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
-        * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
-        * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
-        * for MST -> TimeSlotsPerMTP has to be calculated
-        */
-       bits_per_pixel = (link_clock * lane_count * 8 *
-                         DP_DSC_FEC_OVERHEAD_FACTOR) /
-               mode_clock;
-
-       /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
-       max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
-               mode_hdisplay;
-
-       /*
-        * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
-        * check, output bpp from small joiner RAM check)
-        */
-       bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
-
-       /* Error out if the max bpp is less than smallest allowed valid bpp */
-       if (bits_per_pixel < valid_dsc_bpp[0]) {
-               DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
-               return 0;
-       }
-
-       /* Find the nearest match in the array of known BPPs from VESA */
-       for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
-               if (bits_per_pixel < valid_dsc_bpp[i + 1])
-                       break;
-       }
-       bits_per_pixel = valid_dsc_bpp[i];
-
-       /*
-        * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
-        * fractional part is 0
-        */
-       return bits_per_pixel << 4;
-}
-
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
-                               int mode_clock,
-                               int mode_hdisplay)
-{
-       u8 min_slice_count, i;
-       int max_slice_width;
-
-       if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
-               min_slice_count = DIV_ROUND_UP(mode_clock,
-                                              DP_DSC_MAX_ENC_THROUGHPUT_0);
-       else
-               min_slice_count = DIV_ROUND_UP(mode_clock,
-                                              DP_DSC_MAX_ENC_THROUGHPUT_1);
-
-       max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
-       if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
-               DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
-                             max_slice_width);
-               return 0;
-       }
-       /* Also take into account max slice width */
-       min_slice_count = min_t(u8, min_slice_count,
-                               DIV_ROUND_UP(mode_hdisplay,
-                                            max_slice_width));
-
-       /* Find the closest match to the valid slice count values */
-       for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
-               if (valid_dsc_slicecount[i] >
-                   drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
-                                                   false))
-                       break;
-               if (min_slice_count  <= valid_dsc_slicecount[i])
-                       return valid_dsc_slicecount[i];
-       }
-
-       DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
-       return 0;
-}
-
 static void
 intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp,
                               const struct intel_crtc_state *crtc_state)
index 657bbb1f5ed08f01fea73b3e93d86b2165dc8237..00981fb9414b6a1d4004635ef18ea611b356cd68 100644 (file)
@@ -102,10 +102,6 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
 bool
 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status);
-u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
-                               int mode_clock, int mode_hdisplay);
-u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock,
-                               int mode_hdisplay);
 
 bool intel_dp_read_dpcd(struct intel_dp *intel_dp);
 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
@@ -118,4 +114,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
        return ~((1 << lane_count) - 1) & 0xf;
 }
 
+u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+
 #endif /* __INTEL_DP_H__ */
index 6df240a01b8c3ba68de001f725a3a6f5446e7bbc..600873c796d093b0d0ee67413343d143669ad240 100644 (file)
@@ -81,7 +81,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
                               adjusted_mode->crtc_clock,
                               crtc_state->port_clock,
                               &crtc_state->dp_m_n,
-                              constant_n);
+                              constant_n, crtc_state->fec_enable);
        crtc_state->dp_m_n.tu = slots;
 
        return 0;
@@ -615,7 +615,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
        intel_encoder->type = INTEL_OUTPUT_DP_MST;
        intel_encoder->power_domain = intel_dig_port->base.power_domain;
        intel_encoder->port = intel_dig_port->base.port;
-       intel_encoder->crtc_mask = BIT(pipe);
+       intel_encoder->crtc_mask = 0x7;
        intel_encoder->cloneable = 0;
 
        intel_encoder->compute_config = intel_dp_mst_compute_config;
index b8148f838354cecafe94b768d20bf3c553ad868c..d5a298c3c83b19a31cc9f9deb23bc7dfc2a1e224 100644 (file)
@@ -525,16 +525,31 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
        val = I915_READ(WRPLL_CTL(id));
        I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
        POSTING_READ(WRPLL_CTL(id));
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
                                 struct intel_shared_dpll *pll)
 {
+       enum intel_dpll_id id = pll->info->id;
        u32 val;
 
        val = I915_READ(SPLL_CTL);
        I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
        POSTING_READ(SPLL_CTL);
+
+       /*
+        * Try to set up the PCH reference clock once all DPLLs
+        * that depend on it have been shut down.
+        */
+       if (dev_priv->pch_ssc_use & BIT(id))
+               intel_init_pch_refclk(dev_priv);
 }
 
 static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
index e7588799fce565d681a7391b98a37ca3a61e3d78..104cf6d42333e5607139fccc16698db20f20fe0f 100644 (file)
@@ -147,11 +147,11 @@ enum intel_dpll_id {
         */
        DPLL_ID_ICL_MGPLL4 = 6,
        /**
-        * @DPLL_ID_TGL_TCPLL5: TGL TC PLL port 5 (TC5)
+        * @DPLL_ID_TGL_MGPLL5: TGL TC PLL port 5 (TC5)
         */
        DPLL_ID_TGL_MGPLL5 = 7,
        /**
-        * @DPLL_ID_TGL_TCPLL6: TGL TC PLL port 6 (TC6)
+        * @DPLL_ID_TGL_MGPLL6: TGL TC PLL port 6 (TC6)
         */
        DPLL_ID_TGL_MGPLL6 = 8,
 };
index dea63be1964f2fe6df25ab60da07f2504dc8fe68..cae25e493128d7ca94b254ef07d9d461e186ae3a 100644 (file)
@@ -1528,6 +1528,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
        int src_x, src_w, src_h, crtc_w, crtc_h;
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
+       unsigned int stride = plane_state->color_plane[0].stride;
        unsigned int cpp = fb->format->cpp[0];
        unsigned int width_bytes;
        int min_width, min_height;
@@ -1569,9 +1570,9 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state,
                return -EINVAL;
        }
 
-       if (width_bytes > 4096 || fb->pitches[0] > 4096) {
+       if (stride > 4096) {
                DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n",
-                             fb->pitches[0], 4096);
+                             stride, 4096);
                return -EINVAL;
        }
 
index 261c9bd83f518c9026ef5d13ea5d8982f703fe24..05289edbafe34b8dbaf1890973f345b0aba5c3e0 100644 (file)
@@ -245,11 +245,9 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
 
        wakeref = intel_runtime_pm_get(rpm);
 
-       srcu = intel_gt_reset_trylock(ggtt->vm.gt);
-       if (srcu < 0) {
-               ret = srcu;
+       ret = intel_gt_reset_trylock(ggtt->vm.gt, &srcu);
+       if (ret)
                goto err_rpm;
-       }
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
@@ -318,7 +316,11 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
                intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
                                   msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
 
-       i915_vma_set_ggtt_write(vma);
+       if (write) {
+               GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+               i915_vma_set_ggtt_write(vma);
+               obj->mm.dirty = true;
+       }
 
 err_fence:
        i915_vma_unpin_fence(vma);
@@ -362,6 +364,7 @@ err:
                return VM_FAULT_OOM;
        case -ENOSPC:
        case -EFAULT:
+       case -ENODEV: /* bad object, how did you get here! */
                return VM_FAULT_SIGBUS;
        default:
                WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret);
@@ -473,10 +476,16 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (!obj)
                return -ENOENT;
 
+       if (i915_gem_object_never_bind_ggtt(obj)) {
+               ret = -ENODEV;
+               goto out;
+       }
+
        ret = create_mmap_offset(obj);
        if (ret == 0)
                *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
+out:
        i915_gem_object_put(obj);
        return ret;
 }
index 5efb9936e05b512fe9d7dbcd47cf8069ac844e2e..ddf3605bea8eb4f3aaeb8a097507a0c9805acb58 100644 (file)
@@ -152,6 +152,12 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
        return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY;
 }
 
+static inline bool
+i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+{
+       return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT;
+}
+
 static inline bool
 i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
 {
index ede0eb4218a81b9f1c67390afe7b2433afdf7a49..646859fea2246fad71e79f4ce14f8102759f573d 100644 (file)
@@ -32,7 +32,8 @@ struct drm_i915_gem_object_ops {
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE        BIT(0)
 #define I915_GEM_OBJECT_IS_SHRINKABLE  BIT(1)
 #define I915_GEM_OBJECT_IS_PROXY       BIT(2)
-#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT                BIT(3)
+#define I915_GEM_OBJECT_ASYNC_CANCEL   BIT(4)
 
        /* Interface between the GEM object and its backing storage.
         * get_pages() is called once prior to the use of the associated set
index 92e53c25424c52ec58df03a5df853080ca64eaa1..ad2a63dbcac2f716f10d688d4b34a20c622213b6 100644 (file)
@@ -241,9 +241,6 @@ void i915_gem_resume(struct drm_i915_private *i915)
        mutex_lock(&i915->drm.struct_mutex);
        intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
-       i915_gem_restore_gtt_mappings(i915);
-       i915_gem_restore_fences(i915);
-
        if (i915_gem_init_hw(i915))
                goto err_wedged;
 
index 11b231c187c500f104e85bc736a0a1f4b36f2f4a..6b3b50f0f6d98bc856c5e48f41d5d09d3193f2d8 100644 (file)
@@ -702,6 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
        .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
                 I915_GEM_OBJECT_IS_SHRINKABLE |
+                I915_GEM_OBJECT_NO_GGTT |
                 I915_GEM_OBJECT_ASYNC_CANCEL,
        .get_pages = i915_gem_userptr_get_pages,
        .put_pages = i915_gem_userptr_put_pages,
index d3c6993f4f46f8c71f89d14759ded4f10ca23e11..22aab8593abfa98b08e0e751572562e990c88c75 100644 (file)
@@ -136,6 +136,20 @@ execlists_active(const struct intel_engine_execlists *execlists)
        return READ_ONCE(*execlists->active);
 }
 
+static inline void
+execlists_active_lock_bh(struct intel_engine_execlists *execlists)
+{
+       local_bh_disable(); /* prevent local softirq and lock recursion */
+       tasklet_lock(&execlists->tasklet);
+}
+
+static inline void
+execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
+{
+       tasklet_unlock(&execlists->tasklet);
+       local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
 struct i915_request *
 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists);
 
index 82630db0394b3b43b283bbb5212cb80b6ec87721..4ce8626b140ed8a89142881cd6b85caa88a13f34 100644 (file)
@@ -1197,9 +1197,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                         struct drm_printer *m)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       const struct intel_engine_execlists * const execlists =
-               &engine->execlists;
-       unsigned long flags;
+       struct intel_engine_execlists * const execlists = &engine->execlists;
        u64 addr;
 
        if (engine->id == RENDER_CLASS && IS_GEN_RANGE(dev_priv, 4, 7))
@@ -1281,7 +1279,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                   idx, hws[idx * 2], hws[idx * 2 + 1]);
                }
 
-               spin_lock_irqsave(&engine->active.lock, flags);
+               execlists_active_lock_bh(execlists);
                for (port = execlists->active; (rq = *port); port++) {
                        char hdr[80];
                        int len;
@@ -1309,7 +1307,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
                                 hwsp_seqno(rq));
                        print_request(m, rq, hdr);
                }
-               spin_unlock_irqrestore(&engine->active.lock, flags);
+               execlists_active_unlock_bh(execlists);
        } else if (INTEL_GEN(dev_priv) > 6) {
                drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
                           ENGINE_READ(engine, RING_PP_DIR_BASE));
@@ -1440,8 +1438,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        if (!intel_engine_supports_stats(engine))
                return -ENODEV;
 
-       spin_lock_irqsave(&engine->active.lock, flags);
-       write_seqlock(&engine->stats.lock);
+       execlists_active_lock_bh(execlists);
+       write_seqlock_irqsave(&engine->stats.lock, flags);
 
        if (unlikely(engine->stats.enabled == ~0)) {
                err = -EBUSY;
@@ -1469,8 +1467,8 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
        }
 
 unlock:
-       write_sequnlock(&engine->stats.lock);
-       spin_unlock_irqrestore(&engine->active.lock, flags);
+       write_sequnlock_irqrestore(&engine->stats.lock, flags);
+       execlists_active_unlock_bh(execlists);
 
        return err;
 }
index d42584439f515336641f08105f481912d0131239..06a506c29463d67b3b1c23d4834ccc2598fac273 100644 (file)
@@ -234,6 +234,13 @@ static void execlists_init_reg_state(u32 *reg_state,
                                     struct intel_engine_cs *engine,
                                     struct intel_ring *ring);
 
+static void mark_eio(struct i915_request *rq)
+{
+       if (!i915_request_signaled(rq))
+               dma_fence_set_error(&rq->fence, -EIO);
+       i915_request_mark_complete(rq);
+}
+
 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
 {
        return (i915_ggtt_offset(engine->status_page.vma) +
@@ -631,7 +638,6 @@ execlists_schedule_out(struct i915_request *rq)
        struct intel_engine_cs *cur, *old;
 
        trace_i915_request_out(rq);
-       GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
 
        old = READ_ONCE(ce->inflight);
        do
@@ -797,6 +803,17 @@ static bool can_merge_rq(const struct i915_request *prev,
        GEM_BUG_ON(prev == next);
        GEM_BUG_ON(!assert_priority_queue(prev, next));
 
+       /*
+        * We do not submit known completed requests. Therefore if the next
+        * request is already completed, we can pretend to merge it in
+        * with the previous context (and we will skip updating the ELSP
+        * and tracking). Thus hopefully keeping the ELSP full with active
+        * contexts, despite the best efforts of preempt-to-busy to confuse
+        * us.
+        */
+       if (i915_request_completed(next))
+               return true;
+
        if (!can_merge_ctx(prev->hw_context, next->hw_context))
                return false;
 
@@ -893,7 +910,7 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
 static struct i915_request *
 last_active(const struct intel_engine_execlists *execlists)
 {
-       struct i915_request * const *last = execlists->active;
+       struct i915_request * const *last = READ_ONCE(execlists->active);
 
        while (*last && i915_request_completed(*last))
                last++;
@@ -1172,21 +1189,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                continue;
                        }
 
-                       if (i915_request_completed(rq)) {
-                               ve->request = NULL;
-                               ve->base.execlists.queue_priority_hint = INT_MIN;
-                               rb_erase_cached(rb, &execlists->virtual);
-                               RB_CLEAR_NODE(rb);
-
-                               rq->engine = engine;
-                               __i915_request_submit(rq);
-
-                               spin_unlock(&ve->base.active.lock);
-
-                               rb = rb_first_cached(&execlists->virtual);
-                               continue;
-                       }
-
                        if (last && !can_merge_rq(last, rq)) {
                                spin_unlock(&ve->base.active.lock);
                                return; /* leave this for another */
@@ -1237,11 +1239,24 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                GEM_BUG_ON(ve->siblings[0] != engine);
                        }
 
-                       __i915_request_submit(rq);
-                       if (!i915_request_completed(rq)) {
+                       if (__i915_request_submit(rq)) {
                                submit = true;
                                last = rq;
                        }
+                       i915_request_put(rq);
+
+                       /*
+                        * Hmm, we have a bunch of virtual engine requests,
+                        * but the first one was already completed (thanks
+                        * preempt-to-busy!). Keep looking at the veng queue
+                        * until we have no more relevant requests (i.e.
+                        * the normal submit queue has higher priority).
+                        */
+                       if (!submit) {
+                               spin_unlock(&ve->base.active.lock);
+                               rb = rb_first_cached(&execlists->virtual);
+                               continue;
+                       }
                }
 
                spin_unlock(&ve->base.active.lock);
@@ -1254,8 +1269,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       if (i915_request_completed(rq))
-                               goto skip;
+                       bool merge = true;
 
                        /*
                         * Can we combine this request with the current port?
@@ -1296,14 +1310,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                                    ctx_single_port_submission(rq->hw_context))
                                        goto done;
 
-                               *port = execlists_schedule_in(last, port - execlists->pending);
-                               port++;
+                               merge = false;
                        }
 
-                       last = rq;
-                       submit = true;
-skip:
-                       __i915_request_submit(rq);
+                       if (__i915_request_submit(rq)) {
+                               if (!merge) {
+                                       *port = execlists_schedule_in(last, port - execlists->pending);
+                                       port++;
+                                       last = NULL;
+                               }
+
+                               GEM_BUG_ON(last &&
+                                          !can_merge_ctx(last->hw_context,
+                                                         rq->hw_context));
+
+                               submit = true;
+                               last = rq;
+                       }
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -1593,8 +1616,11 @@ static void process_csb(struct intel_engine_cs *engine)
 static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 {
        lockdep_assert_held(&engine->active.lock);
-       if (!engine->execlists.pending[0])
+       if (!engine->execlists.pending[0]) {
+               rcu_read_lock(); /* protect peeking at execlists->active */
                execlists_dequeue(engine);
+               rcu_read_unlock();
+       }
 }
 
 /*
@@ -2399,10 +2425,14 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
 
 static struct i915_request *active_request(struct i915_request *rq)
 {
-       const struct list_head * const list = &rq->timeline->requests;
        const struct intel_context * const ce = rq->hw_context;
        struct i915_request *active = NULL;
+       struct list_head *list;
 
+       if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
+               return rq;
+
+       list = &rq->timeline->requests;
        list_for_each_entry_from_reverse(rq, list, link) {
                if (i915_request_completed(rq))
                        break;
@@ -2552,12 +2582,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
        __execlists_reset(engine, true);
 
        /* Mark all executing requests as skipped. */
-       list_for_each_entry(rq, &engine->active.requests, sched.link) {
-               if (!i915_request_signaled(rq))
-                       dma_fence_set_error(&rq->fence, -EIO);
-
-               i915_request_mark_complete(rq);
-       }
+       list_for_each_entry(rq, &engine->active.requests, sched.link)
+               mark_eio(rq);
 
        /* Flush the queued requests to the timeline list (for retiring). */
        while ((rb = rb_first_cached(&execlists->queue))) {
@@ -2565,10 +2591,8 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
                int i;
 
                priolist_for_each_request_consume(rq, rn, p, i) {
-                       list_del_init(&rq->sched.link);
+                       mark_eio(rq);
                        __i915_request_submit(rq);
-                       dma_fence_set_error(&rq->fence, -EIO);
-                       i915_request_mark_complete(rq);
                }
 
                rb_erase_cached(&p->node, &execlists->queue);
@@ -2584,13 +2608,15 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
                RB_CLEAR_NODE(rb);
 
                spin_lock(&ve->base.active.lock);
-               if (ve->request) {
-                       ve->request->engine = engine;
-                       __i915_request_submit(ve->request);
-                       dma_fence_set_error(&ve->request->fence, -EIO);
-                       i915_request_mark_complete(ve->request);
+               rq = fetch_and_zero(&ve->request);
+               if (rq) {
+                       mark_eio(rq);
+
+                       rq->engine = engine;
+                       __i915_request_submit(rq);
+                       i915_request_put(rq);
+
                        ve->base.execlists.queue_priority_hint = INT_MIN;
-                       ve->request = NULL;
                }
                spin_unlock(&ve->base.active.lock);
        }
@@ -3594,6 +3620,8 @@ submit_engine:
 static void virtual_submit_request(struct i915_request *rq)
 {
        struct virtual_engine *ve = to_virtual_engine(rq->engine);
+       struct i915_request *old;
+       unsigned long flags;
 
        GEM_TRACE("%s: rq=%llx:%lld\n",
                  ve->base.name,
@@ -3602,15 +3630,31 @@ static void virtual_submit_request(struct i915_request *rq)
 
        GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
 
-       GEM_BUG_ON(ve->request);
-       GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+       spin_lock_irqsave(&ve->base.active.lock, flags);
+
+       old = ve->request;
+       if (old) { /* background completion event from preempt-to-busy */
+               GEM_BUG_ON(!i915_request_completed(old));
+               __i915_request_submit(old);
+               i915_request_put(old);
+       }
+
+       if (i915_request_completed(rq)) {
+               __i915_request_submit(rq);
+
+               ve->base.execlists.queue_priority_hint = INT_MIN;
+               ve->request = NULL;
+       } else {
+               ve->base.execlists.queue_priority_hint = rq_prio(rq);
+               ve->request = i915_request_get(rq);
 
-       ve->base.execlists.queue_priority_hint = rq_prio(rq);
-       WRITE_ONCE(ve->request, rq);
+               GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+               list_move_tail(&rq->sched.link, virtual_queue(ve));
 
-       list_move_tail(&rq->sched.link, virtual_queue(ve));
+               tasklet_schedule(&ve->base.execlists.tasklet);
+       }
 
-       tasklet_schedule(&ve->base.execlists.tasklet);
+       spin_unlock_irqrestore(&ve->base.active.lock, flags);
 }
 
 static struct ve_bond *
@@ -3631,18 +3675,22 @@ static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
        struct virtual_engine *ve = to_virtual_engine(rq->engine);
+       intel_engine_mask_t allowed, exec;
        struct ve_bond *bond;
 
+       allowed = ~to_request(signal)->engine->mask;
+
        bond = virtual_find_bond(ve, to_request(signal)->engine);
-       if (bond) {
-               intel_engine_mask_t old, new, cmp;
+       if (bond)
+               allowed &= bond->sibling_mask;
 
-               cmp = READ_ONCE(rq->execution_mask);
-               do {
-                       old = cmp;
-                       new = cmp & bond->sibling_mask;
-               } while ((cmp = cmpxchg(&rq->execution_mask, old, new)) != old);
-       }
+       /* Restrict the bonded request to run on only the available engines */
+       exec = READ_ONCE(rq->execution_mask);
+       while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed))
+               ;
+
+       /* Prevent the master from being re-run on the bonded engines */
+       to_request(signal)->execution_mask &= ~allowed;
 }
 
 struct intel_context *
index b9d84d52e98642b691ede4cc0107574e866e6a7a..8cea42379dd79815b057f7858772422da7fa6f95 100644 (file)
@@ -42,11 +42,10 @@ static void engine_skip_context(struct i915_request *rq)
        struct intel_engine_cs *engine = rq->engine;
        struct i915_gem_context *hung_ctx = rq->gem_context;
 
-       lockdep_assert_held(&engine->active.lock);
-
        if (!i915_request_is_active(rq))
                return;
 
+       lockdep_assert_held(&engine->active.lock);
        list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
                if (rq->gem_context == hung_ctx)
                        i915_request_skip(rq, -EIO);
@@ -123,7 +122,6 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
                  rq->fence.seqno,
                  yesno(guilty));
 
-       lockdep_assert_held(&rq->engine->active.lock);
        GEM_BUG_ON(i915_request_completed(rq));
 
        if (guilty) {
@@ -1214,10 +1212,8 @@ out:
        intel_runtime_pm_put(&gt->i915->runtime_pm, wakeref);
 }
 
-int intel_gt_reset_trylock(struct intel_gt *gt)
+int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu)
 {
-       int srcu;
-
        might_lock(&gt->reset.backoff_srcu);
        might_sleep();
 
@@ -1232,10 +1228,10 @@ int intel_gt_reset_trylock(struct intel_gt *gt)
 
                rcu_read_lock();
        }
-       srcu = srcu_read_lock(&gt->reset.backoff_srcu);
+       *srcu = srcu_read_lock(&gt->reset.backoff_srcu);
        rcu_read_unlock();
 
-       return srcu;
+       return 0;
 }
 
 void intel_gt_reset_unlock(struct intel_gt *gt, int tag)
index 37a987b1710820f3cce8e46575bde778821dc9ce..52c00199e06941052f28a625269de322acb3defb 100644 (file)
@@ -38,7 +38,7 @@ int intel_engine_reset(struct intel_engine_cs *engine,
 
 void __i915_request_reset(struct i915_request *rq, bool guilty);
 
-int __must_check intel_gt_reset_trylock(struct intel_gt *gt);
+int __must_check intel_gt_reset_trylock(struct intel_gt *gt, int *srcu);
 void intel_gt_reset_unlock(struct intel_gt *gt, int tag);
 
 void intel_gt_set_wedged(struct intel_gt *gt);
index 601c16239fdf050827c3e78b7b402f27e524b3af..bacaa7bb8c9ab6689e69f373d421a2bc26925ced 100644 (file)
@@ -1573,7 +1573,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
        struct intel_engine_cs *engine = rq->engine;
        enum intel_engine_id id;
        const int num_engines =
-               IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
+               IS_HASWELL(i915) ? RUNTIME_INFO(i915)->num_engines - 1 : 0;
        bool force_restore = false;
        int len;
        u32 *cs;
index 45481eb1fa3c30ccb0e47d414325746e8f1cb929..5f6ec2fd29a0105f387b9f36f37c3762b047240f 100644 (file)
@@ -1063,6 +1063,9 @@ static void gen9_whitelist_build(struct i915_wa_list *w)
 
        /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
        whitelist_reg(w, GEN8_HDC_CHICKEN1);
+
+       /* WaSendPushConstantsFromMMIO:skl,bxt */
+       whitelist_reg(w, COMMON_SLICE_CHICKEN2);
 }
 
 static void skl_whitelist_build(struct intel_engine_cs *engine)
index 020696726f9e63703e665b614fb83cf1a8c54a17..bb6f86c7067a61bf934977a38254bb81b606c475 100644 (file)
@@ -1924,6 +1924,11 @@ static int i915_drm_resume(struct drm_device *dev)
        if (ret)
                DRM_ERROR("failed to re-enable GGTT\n");
 
+       mutex_lock(&dev_priv->drm.struct_mutex);
+       i915_gem_restore_gtt_mappings(dev_priv);
+       i915_gem_restore_fences(dev_priv);
+       mutex_unlock(&dev_priv->drm.struct_mutex);
+
        intel_csr_ucode_resume(dev_priv);
 
        i915_restore_state(dev_priv);
index 772154e4073e267bf4368a97c301f6602f75c0d8..953e1d12c23ce2e5fe1a2ee7da8a6fa9e015aa0a 100644 (file)
@@ -1723,6 +1723,8 @@ struct drm_i915_private {
                struct work_struct idle_work;
        } gem;
 
+       u8 pch_ssc_use;
+
        /* For i945gm vblank irq vs. C3 workaround */
        struct {
                struct work_struct work;
index 95e7c52cf8edc430e5c77bd866a8db183e29907c..d0f94f239919e1fefdd6df3e0a4270f7a8fd8952 100644 (file)
@@ -969,6 +969,9 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
+       if (i915_gem_object_never_bind_ggtt(obj))
+               return ERR_PTR(-ENODEV);
+
        if (flags & PIN_MAPPABLE &&
            (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
                /* If the required space is larger than the available
index 167a7b56ed5b5e47c8a502d55cf84db127bec51d..6795f1daa3d500d9076d5c7e387bb38bec15caed 100644 (file)
@@ -77,6 +77,12 @@ struct drm_i915_private;
 
 #define I915_GEM_IDLE_TIMEOUT (HZ / 5)
 
+static inline void tasklet_lock(struct tasklet_struct *t)
+{
+       while (!tasklet_trylock(t))
+               cpu_relax();
+}
+
 static inline void __tasklet_disable_sync_once(struct tasklet_struct *t)
 {
        if (!atomic_fetch_inc(&t->count))
index a53777dd371c37ab7ea69c1e958a9a045a983972..1c5506822dc7296a9d247de9cbf0330a55f55cb2 100644 (file)
@@ -194,6 +194,27 @@ static void free_capture_list(struct i915_request *request)
        }
 }
 
+static void remove_from_engine(struct i915_request *rq)
+{
+       struct intel_engine_cs *engine, *locked;
+
+       /*
+        * Virtual engines complicate acquiring the engine timeline lock,
+        * as their rq->engine pointer is not stable until under that
+        * engine lock. The simple ploy we use is to take the lock then
+        * check that the rq still belongs to the newly locked engine.
+        */
+       locked = READ_ONCE(rq->engine);
+       spin_lock(&locked->active.lock);
+       while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
+               spin_unlock(&locked->active.lock);
+               spin_lock(&engine->active.lock);
+               locked = engine;
+       }
+       list_del(&rq->sched.link);
+       spin_unlock(&locked->active.lock);
+}
+
 static bool i915_request_retire(struct i915_request *rq)
 {
        struct i915_active_request *active, *next;
@@ -259,9 +280,7 @@ static bool i915_request_retire(struct i915_request *rq)
         * request that we have removed from the HW and put back on a run
         * queue.
         */
-       spin_lock(&rq->engine->active.lock);
-       list_del(&rq->sched.link);
-       spin_unlock(&rq->engine->active.lock);
+       remove_from_engine(rq);
 
        spin_lock(&rq->lock);
        i915_request_mark_complete(rq);
@@ -358,9 +377,10 @@ __i915_request_await_execution(struct i915_request *rq,
        return 0;
 }
 
-void __i915_request_submit(struct i915_request *request)
+bool __i915_request_submit(struct i915_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
+       bool result = false;
 
        GEM_TRACE("%s fence %llx:%lld, current %d\n",
                  engine->name,
@@ -370,6 +390,25 @@ void __i915_request_submit(struct i915_request *request)
        GEM_BUG_ON(!irqs_disabled());
        lockdep_assert_held(&engine->active.lock);
 
+       /*
+        * With the advent of preempt-to-busy, we frequently encounter
+        * requests that we have unsubmitted from HW, but left running
+        * until the next ack and so have completed in the meantime. On
+        * resubmission of that completed request, we can skip
+        * updating the payload, and execlists can even skip submitting
+        * the request.
+        *
+        * We must remove the request from the caller's priority queue,
+        * and the caller must only call us when the request is in their
+        * priority queue, under the active.lock. This ensures that the
+        * request has *not* yet been retired and we can safely move
+        * the request into the engine->active.list where it will be
+        * dropped upon retiring. (Otherwise if resubmit a *retired*
+        * request, this would be a horrible use-after-free.)
+        */
+       if (i915_request_completed(request))
+               goto xfer;
+
        if (i915_gem_context_is_banned(request->gem_context))
                i915_request_skip(request, -EIO);
 
@@ -393,13 +432,18 @@ void __i915_request_submit(struct i915_request *request)
            i915_sw_fence_signaled(&request->semaphore))
                engine->saturated |= request->sched.semaphores;
 
-       /* We may be recursing from the signal callback of another i915 fence */
-       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
+       engine->emit_fini_breadcrumb(request,
+                                    request->ring->vaddr + request->postfix);
 
-       list_move_tail(&request->sched.link, &engine->active.requests);
+       trace_i915_request_execute(request);
+       engine->serial++;
+       result = true;
+
+xfer:  /* We may be recursing from the signal callback of another i915 fence */
+       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
-       GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
-       set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
+       if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+               list_move_tail(&request->sched.link, &engine->active.requests);
 
        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
@@ -410,12 +454,7 @@ void __i915_request_submit(struct i915_request *request)
 
        spin_unlock(&request->lock);
 
-       engine->emit_fini_breadcrumb(request,
-                                    request->ring->vaddr + request->postfix);
-
-       engine->serial++;
-
-       trace_i915_request_execute(request);
+       return result;
 }
 
 void i915_request_submit(struct i915_request *request)
index 8ac6e1226a5617683d7ffe2fb78d0ff6cb3b15f2..e4dd013761e81331a6ce7a44f35d56956310c9b1 100644 (file)
@@ -292,7 +292,7 @@ int i915_request_await_execution(struct i915_request *rq,
 
 void i915_request_add(struct i915_request *rq);
 
-void __i915_request_submit(struct i915_request *request);
+bool __i915_request_submit(struct i915_request *request);
 void i915_request_submit(struct i915_request *request);
 
 void i915_request_skip(struct i915_request *request, int error);
index fa864d8f2b731842f7072fa4d73bdc2d54fa28c8..15f8bff141f9134c78a2e295a5afffc0a04e9234 100644 (file)
@@ -69,6 +69,7 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
                WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
                return PCH_CNP;
        case INTEL_PCH_CMP_DEVICE_ID_TYPE:
+       case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
                DRM_DEBUG_KMS("Found Comet Lake PCH (CMP)\n");
                WARN_ON(!IS_COFFEELAKE(dev_priv));
                /* CometPoint is CNP Compatible */
index e6a2d65f19c620c0a9d12792892c1980fb358b0b..c29c81ec7971c51d628d88da0548e0ef44fc382e 100644 (file)
@@ -41,6 +41,7 @@ enum intel_pch {
 #define INTEL_PCH_CNP_DEVICE_ID_TYPE           0xA300
 #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE                0x9D80
 #define INTEL_PCH_CMP_DEVICE_ID_TYPE           0x0280
+#define INTEL_PCH_CMP2_DEVICE_ID_TYPE          0x0680
 #define INTEL_PCH_ICP_DEVICE_ID_TYPE           0x3480
 #define INTEL_PCH_MCC_DEVICE_ID_TYPE           0x4B00
 #define INTEL_PCH_MCC2_DEVICE_ID_TYPE          0x3880
index bb6dd54a6ff3ed1e8201114e1a2f857f9f9e1ba1..37593831b539495473c666f5f1e06875879b00fd 100644 (file)
@@ -118,6 +118,12 @@ static void pm_resume(struct drm_i915_private *i915)
        with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
                intel_gt_sanitize(&i915->gt, false);
                i915_gem_sanitize(i915);
+
+               mutex_lock(&i915->drm.struct_mutex);
+               i915_gem_restore_gtt_mappings(i915);
+               i915_gem_restore_fences(i915);
+               mutex_unlock(&i915->drm.struct_mutex);
+
                i915_gem_resume(i915);
        }
 }
index 663ff9f4fac973811bcaa930a574eb27e3c71de0..1e7b1be25bb07a1d6ac2b7134dded4fdac23c68a 100644 (file)
@@ -26,6 +26,8 @@
 #include "dsi_cfg.h"
 #include "msm_kms.h"
 
+#define DSI_RESET_TOGGLE_DELAY_MS 20
+
 static int dsi_get_version(const void __iomem *base, u32 *major, u32 *minor)
 {
        u32 ver;
@@ -986,7 +988,7 @@ static void dsi_sw_reset(struct msm_dsi_host *msm_host)
        wmb(); /* clocks need to be enabled before reset */
 
        dsi_write(msm_host, REG_DSI_RESET, 1);
-       wmb(); /* make sure reset happen */
+       msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
        dsi_write(msm_host, REG_DSI_RESET, 0);
 }
 
@@ -1396,7 +1398,7 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
 
        /* dsi controller can only be reset while clocks are running */
        dsi_write(msm_host, REG_DSI_RESET, 1);
-       wmb();  /* make sure reset happen */
+       msleep(DSI_RESET_TOGGLE_DELAY_MS); /* make sure reset happen */
        dsi_write(msm_host, REG_DSI_RESET, 0);
        wmb();  /* controller out of reset */
        dsi_write(msm_host, REG_DSI_CTRL, data0);
index e226324adb697874631cd23f95cff68a3b7d0b39..4bdd63b5710029684ea9d9445c737d1dc71dfa9c 100644 (file)
@@ -1083,7 +1083,7 @@ static const struct dss_features omap34xx_dss_feats = {
 
 static const struct dss_features omap3630_dss_feats = {
        .model                  =       DSS_MODEL_OMAP3,
-       .fck_div_max            =       32,
+       .fck_div_max            =       31,
        .fck_freq_max           =       173000000,
        .dss_fck_multiplier     =       1,
        .parent_clk_name        =       "dpll4_ck",
index fc82a525b071bc3bc9f8e9ac0d3cedbca849bb3d..ee4379729a5b8b0fd376abb6629c9dfee79a7719 100644 (file)
@@ -220,9 +220,17 @@ static const struct of_device_id lb035q02_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, lb035q02_of_match);
 
+static const struct spi_device_id lb035q02_ids[] = {
+       { "lb035q02", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, lb035q02_ids);
+
 static struct spi_driver lb035q02_driver = {
        .probe          = lb035q02_probe,
        .remove         = lb035q02_remove,
+       .id_table       = lb035q02_ids,
        .driver         = {
                .name   = "panel-lg-lb035q02",
                .of_match_table = lb035q02_of_match,
@@ -231,7 +239,6 @@ static struct spi_driver lb035q02_driver = {
 
 module_spi_driver(lb035q02_driver);
 
-MODULE_ALIAS("spi:lgphilips,lb035q02");
 MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>");
 MODULE_DESCRIPTION("LG.Philips LB035Q02 LCD Panel driver");
 MODULE_LICENSE("GPL");
index 299b217c83e186493e20466b5ebfae0f6caa916f..20f17e46e65daacb3398977fc18112412218502f 100644 (file)
@@ -230,9 +230,17 @@ static const struct of_device_id nl8048_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, nl8048_of_match);
 
+static const struct spi_device_id nl8048_ids[] = {
+       { "nl8048hl11", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, nl8048_ids);
+
 static struct spi_driver nl8048_driver = {
        .probe          = nl8048_probe,
        .remove         = nl8048_remove,
+       .id_table       = nl8048_ids,
        .driver         = {
                .name   = "panel-nec-nl8048hl11",
                .pm     = &nl8048_pm_ops,
@@ -242,7 +250,6 @@ static struct spi_driver nl8048_driver = {
 
 module_spi_driver(nl8048_driver);
 
-MODULE_ALIAS("spi:nec,nl8048hl11");
 MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
 MODULE_DESCRIPTION("NEC-NL8048HL11 Driver");
 MODULE_LICENSE("GPL");
index 305259b5876701dc9db0aab4e472e16306ec3a02..3d5b9c4f68d98a9514ae3902b5b2919207dd1a84 100644 (file)
@@ -684,9 +684,17 @@ static const struct of_device_id acx565akm_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, acx565akm_of_match);
 
+static const struct spi_device_id acx565akm_ids[] = {
+       { "acx565akm", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, acx565akm_ids);
+
 static struct spi_driver acx565akm_driver = {
        .probe          = acx565akm_probe,
        .remove         = acx565akm_remove,
+       .id_table       = acx565akm_ids,
        .driver         = {
                .name   = "panel-sony-acx565akm",
                .of_match_table = acx565akm_of_match,
@@ -695,7 +703,6 @@ static struct spi_driver acx565akm_driver = {
 
 module_spi_driver(acx565akm_driver);
 
-MODULE_ALIAS("spi:sony,acx565akm");
 MODULE_AUTHOR("Nokia Corporation");
 MODULE_DESCRIPTION("Sony ACX565AKM LCD Panel Driver");
 MODULE_LICENSE("GPL");
index d7b2e34626efe81435a632711fb8b5dc2e1b3d01..f2baff827f50799a9666634317d756c69c3ca909 100644 (file)
@@ -375,8 +375,7 @@ static const struct of_device_id td028ttec1_of_match[] = {
 MODULE_DEVICE_TABLE(of, td028ttec1_of_match);
 
 static const struct spi_device_id td028ttec1_ids[] = {
-       { "tpo,td028ttec1", 0},
-       { "toppoly,td028ttec1", 0 },
+       { "td028ttec1", 0 },
        { /* sentinel */ }
 };
 
index 84370562910ff5dfcd52dd1896d227464db2d9bb..ba163c779084c40fb3f3eb90c157c2b1a73f937c 100644 (file)
@@ -491,9 +491,17 @@ static const struct of_device_id td043mtea1_of_match[] = {
 
 MODULE_DEVICE_TABLE(of, td043mtea1_of_match);
 
+static const struct spi_device_id td043mtea1_ids[] = {
+       { "td043mtea1", 0 },
+       { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(spi, td043mtea1_ids);
+
 static struct spi_driver td043mtea1_driver = {
        .probe          = td043mtea1_probe,
        .remove         = td043mtea1_remove,
+       .id_table       = td043mtea1_ids,
        .driver         = {
                .name   = "panel-tpo-td043mtea1",
                .pm     = &td043mtea1_pm_ops,
@@ -503,7 +511,6 @@ static struct spi_driver td043mtea1_driver = {
 
 module_spi_driver(td043mtea1_driver);
 
-MODULE_ALIAS("spi:tpo,td043mtea1");
 MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
 MODULE_DESCRIPTION("TPO TD043MTEA1 Panel Driver");
 MODULE_LICENSE("GPL");
index bc2ddeb55f5d86ffbc9cbe1a39e2e90d77ce60df..f21bc8a7ee3a2fe7d6cdef0747079cf450dc6e6d 100644 (file)
@@ -556,11 +556,11 @@ static int panfrost_probe(struct platform_device *pdev)
        return 0;
 
 err_out2:
+       pm_runtime_disable(pfdev->dev);
        panfrost_devfreq_fini(pfdev);
 err_out1:
        panfrost_device_fini(pfdev);
 err_out0:
-       pm_runtime_disable(pfdev->dev);
        drm_dev_put(ddev);
        return err;
 }
index f67ed925c0eff63b2c171dab1fd91574342b516b..8822ec13a0d619f26d9f22fe54321dbfefc88daf 100644 (file)
@@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev)
        pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES);
        pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES);
        pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES);
+       pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS);
+       pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE);
+       pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE);
        pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES);
        for (i = 0; i < 4; i++)
                pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i));
index a58551668d9aa8d5c452befbe1796d2f1f23b587..21f34d44aac2c01fcbce39e233bf1115df4f3cd6 100644 (file)
@@ -381,13 +381,19 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
                job_read(pfdev, JS_TAIL_LO(js)),
                sched_job);
 
-       mutex_lock(&pfdev->reset_lock);
+       if (!mutex_trylock(&pfdev->reset_lock))
+               return;
 
-       for (i = 0; i < NUM_JOB_SLOTS; i++)
-               drm_sched_stop(&pfdev->js->queue[i].sched, sched_job);
+       for (i = 0; i < NUM_JOB_SLOTS; i++) {
+               struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched;
+
+               drm_sched_stop(sched, sched_job);
+               if (js != i)
+                       /* Ensure any timeouts on other slots have finished */
+                       cancel_delayed_work_sync(&sched->work_tdr);
+       }
 
-       if (sched_job)
-               drm_sched_increase_karma(sched_job);
+       drm_sched_increase_karma(sched_job);
 
        spin_lock_irqsave(&pfdev->js->job_lock, flags);
        for (i = 0; i < NUM_JOB_SLOTS; i++) {
index bdd99056847689c2c88fe67d1435e84662cd0eb6..a3ed64a1f15ecd8fd686b99fce496ddb760ed938 100644 (file)
@@ -224,9 +224,9 @@ static size_t get_pgsize(u64 addr, size_t size)
        return SZ_2M;
 }
 
-void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
-                             struct panfrost_mmu *mmu,
-                             u64 iova, size_t size)
+static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
+                                    struct panfrost_mmu *mmu,
+                                    u64 iova, size_t size)
 {
        if (mmu->as < 0)
                return;
@@ -406,11 +406,11 @@ addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
        spin_lock(&pfdev->as_lock);
        list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
                if (as == mmu->as)
-                       break;
+                       goto found_mmu;
        }
-       if (as != mmu->as)
-               goto out;
+       goto out;
 
+found_mmu:
        priv = container_of(mmu, struct panfrost_file_priv, mmu);
 
        spin_lock(&priv->mm_lock);
@@ -432,7 +432,8 @@ out:
 
 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
 
-int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
+static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
+                                      u64 addr)
 {
        int ret, i;
        struct panfrost_gem_object *bo;
index 83c57d325ca8754519b4949ca817c9187018db16..2dba192bf198454982d54ab9403f253e0529c652 100644 (file)
@@ -16,6 +16,7 @@
 #include "panfrost_issues.h"
 #include "panfrost_job.h"
 #include "panfrost_mmu.h"
+#include "panfrost_perfcnt.h"
 #include "panfrost_regs.h"
 
 #define COUNTERS_PER_BLOCK             64
index d0bc91ed7c90a73b898e57a6a77a4786c11ed169..4528f4dc0b2d5e4f7fbd3c635ddef9f050f05292 100644 (file)
@@ -379,7 +379,9 @@ radeon_pci_remove(struct pci_dev *pdev)
 static void
 radeon_pci_shutdown(struct pci_dev *pdev)
 {
+#ifdef CONFIG_PPC64
        struct drm_device *ddev = pci_get_drvdata(pdev);
+#endif
 
        /* if we are running in a VM, make sure the device
         * torn down properly on reboot/shutdown
@@ -387,11 +389,15 @@ radeon_pci_shutdown(struct pci_dev *pdev)
        if (radeon_device_is_virtual())
                radeon_pci_remove(pdev);
 
+#ifdef CONFIG_PPC64
        /* Some adapters need to be suspended before a
-       * shutdown occurs in order to prevent an error
-       * during kexec.
-       */
+        * shutdown occurs in order to prevent an error
+        * during kexec.
+        * Make this power specific becauase it breaks
+        * some non-power boards.
+        */
        radeon_suspend_kms(ddev, true, true, false);
+#endif
 }
 
 static int radeon_pmops_suspend(struct device *dev)
index ae07290bba6a4bd81eb70e8353e5b2eb0993eecc..04efa78d70b6ea50bb8e354ea3697fc99d3562e6 100644 (file)
@@ -147,7 +147,7 @@ static int rcar_du_wb_enc_atomic_check(struct drm_encoder *encoder,
        struct drm_device *dev = encoder->dev;
        struct drm_framebuffer *fb;
 
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        fb = conn_state->writeback_job->fb;
@@ -221,7 +221,7 @@ void rcar_du_writeback_setup(struct rcar_du_crtc *rcrtc,
        unsigned int i;
 
        state = rcrtc->writeback.base.state;
-       if (!state || !state->writeback_job || !state->writeback_job->fb)
+       if (!state || !state->writeback_job)
                return;
 
        fb = state->writeback_job->fb;
index 9a0ee74d82dc2b6c39859781c44e4e4be615cd1e..f39b97ed4ade4db6730fb5a31029c16701fd1d7d 100644 (file)
@@ -479,6 +479,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
        struct drm_sched_job *s_job, *tmp;
        uint64_t guilty_context;
        bool found_guilty = false;
+       struct dma_fence *fence;
 
        list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
                struct drm_sched_fence *s_fence = s_job->s_fence;
@@ -492,7 +493,16 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
                        dma_fence_set_error(&s_fence->finished, -ECANCELED);
 
                dma_fence_put(s_job->s_fence->parent);
-               s_job->s_fence->parent = sched->ops->run_job(s_job);
+               fence = sched->ops->run_job(s_job);
+
+               if (IS_ERR_OR_NULL(fence)) {
+                       s_job->s_fence->parent = NULL;
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
+               } else {
+                       s_job->s_fence->parent = fence;
+               }
+
+
        }
 }
 EXPORT_SYMBOL(drm_sched_resubmit_jobs);
@@ -720,7 +730,7 @@ static int drm_sched_main(void *param)
                fence = sched->ops->run_job(sched_job);
                drm_sched_fence_scheduled(s_fence);
 
-               if (fence) {
+               if (!IS_ERR_OR_NULL(fence)) {
                        s_fence->parent = dma_fence_get(fence);
                        r = dma_fence_add_callback(fence, &sched_job->cb,
                                                   drm_sched_process_job);
@@ -730,8 +740,11 @@ static int drm_sched_main(void *param)
                                DRM_ERROR("fence add callback failed (%d)\n",
                                          r);
                        dma_fence_put(fence);
-               } else
+               } else {
+
+                       dma_fence_set_error(&s_fence->finished, PTR_ERR(fence));
                        drm_sched_process_job(NULL, &sched_job->cb);
+               }
 
                wake_up(&sched->job_scheduled);
        }
index 525dc1c0f1c148ed835bfc60b0292b59ea33ab80..530edb3b51cc0c224ec04f8c96f406ec013a47ae 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/gpio.h>
 #include <linux/mod_devicetable.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_device.h>
 
 #include <drm/drm_atomic_helper.h>
index 504763423d463a69122e3fe92870c8bec8b63aad..a46ac284dd5e9211070dcf1ecea877bdde90a760 100644 (file)
@@ -63,7 +63,6 @@ config TINYDRM_REPAPER
        depends on DRM && SPI
        select DRM_KMS_HELPER
        select DRM_KMS_CMA_HELPER
-       depends on THERMAL || !THERMAL
        help
          DRM driver for the following Pervasive Displays panels:
          1.44" TFT EPD Panel (E1144CS021)
index 20ff56f27aa4a988471ce390fd4149a6ec54343d..98819462f025fca646894d7245d136e75619d365 100644 (file)
@@ -185,8 +185,9 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
        list_add_tail(&bo->lru, &man->lru[bo->priority]);
        kref_get(&bo->list_kref);
 
-       if (bo->ttm && !(bo->ttm->page_flags &
-                        (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
+       if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
+           !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
+                                    TTM_PAGE_FLAG_SWAPPED))) {
                list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
                kref_get(&bo->list_kref);
        }
@@ -878,11 +879,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 
        if (!bo) {
                if (busy_bo)
-                       ttm_bo_get(busy_bo);
+                       kref_get(&busy_bo->list_kref);
                spin_unlock(&glob->lru_lock);
                ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
                if (busy_bo)
-                       ttm_bo_put(busy_bo);
+                       kref_put(&busy_bo->list_kref, ttm_bo_release_list);
                return ret;
        }
 
index 76eedb9636931da799fe0954b6ef564ed7219621..46dc3de7e81bfba47037eea2a2dfbdd10e2b1313 100644 (file)
@@ -278,15 +278,13 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
                else
                        ret = vmf_insert_pfn(&cvma, address, pfn);
 
-               /*
-                * Somebody beat us to this PTE or prefaulting to
-                * an already populated PTE, or prefaulting error.
-                */
-
-               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
-                       break;
-               else if (unlikely(ret & VM_FAULT_ERROR))
-                       goto out_io_unlock;
+               /* Never error on prefaulted PTEs */
+               if (unlikely((ret & VM_FAULT_ERROR))) {
+                       if (i == 0)
+                               goto out_io_unlock;
+                       else
+                               break;
+               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))
index 5d80507b539bd01a28619629c7dd6cf9d4ddf42c..19c092d75266b451c05f5e095ef6b8150782a68a 100644 (file)
@@ -557,13 +557,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
        if (args->bcl_start != args->bcl_end) {
                bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
-               if (!bin)
+               if (!bin) {
+                       v3d_job_put(&render->base);
                        return -ENOMEM;
+               }
 
                ret = v3d_job_init(v3d, file_priv, &bin->base,
                                   v3d_job_free, args->in_sync_bcl);
                if (ret) {
                        v3d_job_put(&render->base);
+                       kfree(bin);
                        return ret;
                }
 
index 1ce4d7142b6e9f1bae20b4dcc20c14692fdf61b6..bf720206727f08d3b72454133dd4e1f8d51ad909 100644 (file)
@@ -231,7 +231,7 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
        int i;
 
        conn_state = drm_atomic_get_new_connector_state(state, conn);
-       if (!conn_state->writeback_job || !conn_state->writeback_job->fb)
+       if (!conn_state->writeback_job)
                return 0;
 
        crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
@@ -271,8 +271,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
        u32 ctrl;
        int i;
 
-       if (WARN_ON(!conn_state->writeback_job ||
-                   !conn_state->writeback_job->fb))
+       if (WARN_ON(!conn_state->writeback_job))
                return;
 
        mode = &conn_state->crtc->state->adjusted_mode;
index ba1828acd8c9999029db8ddc7a1cde48cd0b312a..4be49c1aef5189bfa02d9cd9299f2cf1f4c82450 100644 (file)
@@ -718,17 +718,9 @@ static int xen_drv_probe(struct xenbus_device *xb_dev,
        struct device *dev = &xb_dev->dev;
        int ret;
 
-       /*
-        * The device is not spawn from a device tree, so arch_setup_dma_ops
-        * is not called, thus leaving the device with dummy DMA ops.
-        * This makes the device return error on PRIME buffer import, which
-        * is not correct: to fix this call of_dma_configure() with a NULL
-        * node to set default DMA ops.
-        */
-       dev->coherent_dma_mask = DMA_BIT_MASK(32);
-       ret = of_dma_configure(dev, NULL, true);
+       ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
        if (ret < 0) {
-               DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
+               DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
                return ret;
        }
 
index 6654c1550e2e7af9e73b439ae6b7cc45ee31cc88..fbe4e16ab0297f778402eca7d9e44d2621bd1154 100644 (file)
@@ -63,13 +63,20 @@ static int axff_init(struct hid_device *hid)
 {
        struct axff_device *axff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =&hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int field_count = 0;
        int i, j;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 3eaee2c37931bad846decfa127bb905c134770e9..63fdbf09b044f3c8b0211d7e46c25ffa39776eb0 100644 (file)
@@ -1139,6 +1139,7 @@ int hid_open_report(struct hid_device *device)
        __u8 *start;
        __u8 *buf;
        __u8 *end;
+       __u8 *next;
        int ret;
        static int (*dispatch_type[])(struct hid_parser *parser,
                                      struct hid_item *item) = {
@@ -1192,7 +1193,8 @@ int hid_open_report(struct hid_device *device)
        device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
 
        ret = -EINVAL;
-       while ((start = fetch_item(start, end, &item)) != NULL) {
+       while ((next = fetch_item(start, end, &item)) != NULL) {
+               start = next;
 
                if (item.format != HID_ITEM_FORMAT_SHORT) {
                        hid_err(device, "unexpected long global item\n");
@@ -1230,7 +1232,8 @@ int hid_open_report(struct hid_device *device)
                }
        }
 
-       hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
+       hid_err(device, "item fetching failed at offset %u/%u\n",
+               size - (unsigned int)(end - start), size);
 err:
        kfree(parser->collection_stack);
 alloc_err:
index 17e17f9a597b5d9edc35b6f85c5b4b3db13e8727..947f19f8685f8668ea6e5568d3dad821156c3fa3 100644 (file)
@@ -75,13 +75,19 @@ static int drff_init(struct hid_device *hid)
 {
        struct drff_device *drff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 7cd5651872d3bd8b8c20455608914322a86c7185..c34f2e5a049f4dd314c72435d41398b2b87b0431 100644 (file)
@@ -47,13 +47,19 @@ static int emsff_init(struct hid_device *hid)
 {
        struct emsff_device *emsff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_first_entry(&hid->inputs,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_first_entry(&hid->inputs, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 0f95c96b70f810114f770d129dd3dd66a12185c6..ecbd3995a4eb1940c689c23ccbda5437f17d237d 100644 (file)
@@ -64,14 +64,20 @@ static int gaff_init(struct hid_device *hid)
 {
        struct gaff_device *gaff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct list_head *report_ptr = report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output reports found\n");
                return -ENODEV;
index 84f8c127ebdc745b61723b7ee2dffb532ce599a2..d86a9189e88fd0b56fc9b088313765a151c12f40 100644 (file)
@@ -469,6 +469,10 @@ static int hammer_probe(struct hid_device *hdev,
 static const struct hid_device_id hammer_devices[] = {
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_HAMMER) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
+       { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+                    USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
                     USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
        { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
index 10a7205588303d5180ef0b4fdecdba93d7d6b2f5..8619b80c834cc0b3c609eed7528b79f60f871ba2 100644 (file)
@@ -124,13 +124,19 @@ static int holtekff_init(struct hid_device *hid)
 {
        struct holtekff_device *holtekff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
+       struct hid_input *hidinput;
        struct list_head *report_list =
                        &hid->report_enum[HID_OUTPUT_REPORT].report_list;
-       struct input_dev *dev = hidinput->input;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (list_empty(report_list)) {
                hid_err(hid, "no output report found\n");
                return -ENODEV;
index cc5b09b87ab0dc70b844a40cd9bffe070e078c84..79a28fc9152162c695a05eb55fe989c48f74612d 100644 (file)
@@ -314,60 +314,24 @@ static void mousevsc_on_receive(struct hv_device *device,
 
 static void mousevsc_on_channel_callback(void *context)
 {
-       const int packet_size = 0x100;
-       int ret;
        struct hv_device *device = context;
-       u32 bytes_recvd;
-       u64 req_id;
        struct vmpacket_descriptor *desc;
-       unsigned char   *buffer;
-       int     bufferlen = packet_size;
-
-       buffer = kmalloc(bufferlen, GFP_ATOMIC);
-       if (!buffer)
-               return;
-
-       do {
-               ret = vmbus_recvpacket_raw(device->channel, buffer,
-                                       bufferlen, &bytes_recvd, &req_id);
-
-               switch (ret) {
-               case 0:
-                       if (bytes_recvd <= 0) {
-                               kfree(buffer);
-                               return;
-                       }
-                       desc = (struct vmpacket_descriptor *)buffer;
-
-                       switch (desc->type) {
-                       case VM_PKT_COMP:
-                               break;
-
-                       case VM_PKT_DATA_INBAND:
-                               mousevsc_on_receive(device, desc);
-                               break;
-
-                       default:
-                               pr_err("unhandled packet type %d, tid %llx len %d\n",
-                                       desc->type, req_id, bytes_recvd);
-                               break;
-                       }
 
+       foreach_vmbus_pkt(desc, device->channel) {
+               switch (desc->type) {
+               case VM_PKT_COMP:
                        break;
 
-               case -ENOBUFS:
-                       kfree(buffer);
-                       /* Handle large packet */
-                       bufferlen = bytes_recvd;
-                       buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
-
-                       if (!buffer)
-                               return;
+               case VM_PKT_DATA_INBAND:
+                       mousevsc_on_receive(device, desc);
+                       break;
 
+               default:
+                       pr_err("Unhandled packet type %d, tid %llx len %d\n",
+                              desc->type, desc->trans_id, desc->len8 * 8);
                        break;
                }
-       } while (1);
-
+       }
 }
 
 static int mousevsc_connect_to_vsp(struct hv_device *device)
index 76969a22b0f2f796a2b424ed252a8c0d08c3e3bd..447e8db21174ae78d35e372868d9234a31318963 100644 (file)
 #define USB_DEVICE_ID_GOOGLE_STAFF     0x502b
 #define USB_DEVICE_ID_GOOGLE_WAND      0x502d
 #define USB_DEVICE_ID_GOOGLE_WHISKERS  0x5030
+#define USB_DEVICE_ID_GOOGLE_MASTERBALL        0x503c
+#define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
 
 #define USB_VENDOR_ID_GOTOP            0x08f2
 #define USB_DEVICE_ID_SUPER_Q2         0x007f
index dd1a6c3a7de67464cec36e48fd89fc9e9a66d02c..73d07e35f12aa73dd58714c1792e7e00677b6f2b 100644 (file)
@@ -50,11 +50,17 @@ int lg2ff_init(struct hid_device *hid)
 {
        struct lg2ff_device *lg2ff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
        if (!report)
index 9ecb6fd062030eb3ea21aab9d99a81c2172ed4d4..b7e1949f3cf779de12accc337981406dd1910f26 100644 (file)
@@ -117,12 +117,19 @@ static const signed short ff3_joystick_ac[] = {
 
 int lg3ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff3_joystick_ac;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
                return -ENODEV;
index 03f0220062cab641a725eddc2e4d51c1bedca141..5e6a0cef2a06d061035520a28e24cc4488859498 100644 (file)
@@ -1253,8 +1253,8 @@ static int lg4ff_handle_multimode_wheel(struct hid_device *hid, u16 *real_produc
 
 int lg4ff_init(struct hid_device *hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
        struct hid_report *report = list_entry(report_list->next, struct hid_report, list);
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
@@ -1266,6 +1266,13 @@ int lg4ff_init(struct hid_device *hid)
        int mmode_ret, mmode_idx = -1;
        u16 real_product_id;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -1;
index c79a6ec437450029f4490ed4e4dfe9ac1f280d2e..aed4ddc397a963154a041487e8d3c8c455b4cbdd 100644 (file)
@@ -115,12 +115,19 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
 
 int lgff_init(struct hid_device* hid)
 {
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const signed short *ff_bits = ff_joystick;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        /* Check that the report looks ok */
        if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
                return -ENODEV;
index 0179f7ed77e5b13f852f4c4fb3fa9619f1c15677..8e91e2f06cb4fc8574e08ca3c8ddd2b4595767e3 100644 (file)
@@ -1669,6 +1669,7 @@ static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
 
 #define HIDPP_FF_EFFECTID_NONE         -1
 #define HIDPP_FF_EFFECTID_AUTOCENTER   -2
+#define HIDPP_AUTOCENTER_PARAMS_LENGTH 18
 
 #define HIDPP_FF_MAX_PARAMS    20
 #define HIDPP_FF_RESERVED_SLOTS        1
@@ -2009,7 +2010,7 @@ static int hidpp_ff_erase_effect(struct input_dev *dev, int effect_id)
 static void hidpp_ff_set_autocenter(struct input_dev *dev, u16 magnitude)
 {
        struct hidpp_ff_private_data *data = dev->ff->private;
-       u8 params[18];
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH];
 
        dbg_hid("Setting autocenter to %d.\n", magnitude);
 
@@ -2077,23 +2078,34 @@ static DEVICE_ATTR(range, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH, hidpp
 static void hidpp_ff_destroy(struct ff_device *ff)
 {
        struct hidpp_ff_private_data *data = ff->private;
+       struct hid_device *hid = data->hidpp->hid_dev;
 
+       hid_info(hid, "Unloading HID++ force feedback.\n");
+
+       device_remove_file(&hid->dev, &dev_attr_range);
+       destroy_workqueue(data->wq);
        kfree(data->effect_ids);
 }
 
-static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
+static int hidpp_ff_init(struct hidpp_device *hidpp,
+                        struct hidpp_ff_private_data *data)
 {
        struct hid_device *hid = hidpp->hid_dev;
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        const struct usb_device_descriptor *udesc = &(hid_to_usb_dev(hid)->descriptor);
        const u16 bcdDevice = le16_to_cpu(udesc->bcdDevice);
        struct ff_device *ff;
-       struct hidpp_report response;
-       struct hidpp_ff_private_data *data;
-       int error, j, num_slots;
+       int error, j, num_slots = data->num_effects;
        u8 version;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        if (!dev) {
                hid_err(hid, "Struct input_dev not set!\n");
                return -EINVAL;
@@ -2109,27 +2121,17 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
                for (j = 0; hidpp_ff_effects_v2[j] >= 0; j++)
                        set_bit(hidpp_ff_effects_v2[j], dev->ffbit);
 
-       /* Read number of slots available in device */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_INFO, NULL, 0, &response);
-       if (error) {
-               if (error < 0)
-                       return error;
-               hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
-                       __func__, error);
-               return -EPROTO;
-       }
-
-       num_slots = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
-
        error = input_ff_create(dev, num_slots);
 
        if (error) {
                hid_err(dev, "Failed to create FF device!\n");
                return error;
        }
-
-       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       /*
+        * Create a copy of passed data, so we can transfer memory
+        * ownership to FF core
+        */
+       data = kmemdup(data, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
        data->effect_ids = kcalloc(num_slots, sizeof(int), GFP_KERNEL);
@@ -2145,10 +2147,7 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        }
 
        data->hidpp = hidpp;
-       data->feature_index = feature_index;
        data->version = version;
-       data->slot_autocenter = 0;
-       data->num_effects = num_slots;
        for (j = 0; j < num_slots; j++)
                data->effect_ids[j] = -1;
 
@@ -2162,68 +2161,20 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
        ff->set_autocenter = hidpp_ff_set_autocenter;
        ff->destroy = hidpp_ff_destroy;
 
-
-       /* reset all forces */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_RESET_ALL, NULL, 0, &response);
-
-       /* Read current Range */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_APERTURE, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read range from device!\n");
-       data->range = error ? 900 : get_unaligned_be16(&response.fap.params[0]);
-
        /* Create sysfs interface */
        error = device_create_file(&(hidpp->hid_dev->dev), &dev_attr_range);
        if (error)
                hid_warn(hidpp->hid_dev, "Unable to create sysfs interface for \"range\", errno %d!\n", error);
 
-       /* Read the current gain values */
-       error = hidpp_send_fap_command_sync(hidpp, feature_index,
-               HIDPP_FF_GET_GLOBAL_GAINS, NULL, 0, &response);
-       if (error)
-               hid_warn(hidpp->hid_dev, "Failed to read gain values from device!\n");
-       data->gain = error ? 0xffff : get_unaligned_be16(&response.fap.params[0]);
-       /* ignore boost value at response.fap.params[2] */
-
        /* init the hardware command queue */
        atomic_set(&data->workqueue_size, 0);
 
-       /* initialize with zero autocenter to get wheel in usable state */
-       hidpp_ff_set_autocenter(dev, 0);
-
        hid_info(hid, "Force feedback support loaded (firmware release %d).\n",
                 version);
 
        return 0;
 }
 
-static int hidpp_ff_deinit(struct hid_device *hid)
-{
-       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
-       struct hidpp_ff_private_data *data;
-
-       if (!dev) {
-               hid_err(hid, "Struct input_dev not found!\n");
-               return -EINVAL;
-       }
-
-       hid_info(hid, "Unloading HID++ force feedback.\n");
-       data = dev->ff->private;
-       if (!data) {
-               hid_err(hid, "Private data not found!\n");
-               return -EINVAL;
-       }
-
-       destroy_workqueue(data->wq);
-       device_remove_file(&hid->dev, &dev_attr_range);
-
-       return 0;
-}
-
-
 /* ************************************************************************** */
 /*                                                                            */
 /* Device Support                                                             */
@@ -2725,24 +2676,93 @@ static int k400_connect(struct hid_device *hdev, bool connected)
 
 #define HIDPP_PAGE_G920_FORCE_FEEDBACK                 0x8123
 
-static int g920_get_config(struct hidpp_device *hidpp)
+static int g920_ff_set_autocenter(struct hidpp_device *hidpp,
+                                 struct hidpp_ff_private_data *data)
 {
+       struct hidpp_report response;
+       u8 params[HIDPP_AUTOCENTER_PARAMS_LENGTH] = {
+               [1] = HIDPP_FF_EFFECT_SPRING | HIDPP_FF_EFFECT_AUTOSTART,
+       };
+       int ret;
+
+       /* initialize with zero autocenter to get wheel in usable state */
+
+       dbg_hid("Setting autocenter to 0.\n");
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_DOWNLOAD_EFFECT,
+                                         params, ARRAY_SIZE(params),
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev, "Failed to autocenter device!\n");
+       else
+               data->slot_autocenter = response.fap.params[0];
+
+       return ret;
+}
+
+static int g920_get_config(struct hidpp_device *hidpp,
+                          struct hidpp_ff_private_data *data)
+{
+       struct hidpp_report response;
        u8 feature_type;
-       u8 feature_index;
        int ret;
 
+       memset(data, 0, sizeof(*data));
+
        /* Find feature and store for later use */
        ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_G920_FORCE_FEEDBACK,
-               &feature_index, &feature_type);
+                                    &data->feature_index, &feature_type);
        if (ret)
                return ret;
 
-       ret = hidpp_ff_init(hidpp, feature_index);
+       /* Read number of slots available in device */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_INFO,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               if (ret < 0)
+                       return ret;
+               hid_err(hidpp->hid_dev,
+                       "%s: received protocol error 0x%02x\n", __func__, ret);
+               return -EPROTO;
+       }
+
+       data->num_effects = response.fap.params[0] - HIDPP_FF_RESERVED_SLOTS;
+
+       /* reset all forces */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_RESET_ALL,
+                                         NULL, 0,
+                                         &response);
        if (ret)
-               hid_warn(hidpp->hid_dev, "Unable to initialize force feedback support, errno %d\n",
-                               ret);
+               hid_warn(hidpp->hid_dev, "Failed to reset all forces!\n");
 
-       return 0;
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_APERTURE,
+                                         NULL, 0,
+                                         &response);
+       if (ret) {
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read range from device!\n");
+       }
+       data->range = ret ?
+               900 : get_unaligned_be16(&response.fap.params[0]);
+
+       /* Read the current gain values */
+       ret = hidpp_send_fap_command_sync(hidpp, data->feature_index,
+                                         HIDPP_FF_GET_GLOBAL_GAINS,
+                                         NULL, 0,
+                                         &response);
+       if (ret)
+               hid_warn(hidpp->hid_dev,
+                        "Failed to read gain values from device!\n");
+       data->gain = ret ?
+               0xffff : get_unaligned_be16(&response.fap.params[0]);
+
+       /* ignore boost value at response.fap.params[2] */
+
+       return g920_ff_set_autocenter(hidpp, data);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -3458,34 +3478,45 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id)
        return report->field[0]->report_count + 1;
 }
 
-static bool hidpp_validate_report(struct hid_device *hdev, int id,
-                                 int expected_length, bool optional)
+static bool hidpp_validate_device(struct hid_device *hdev)
 {
-       int report_length;
+       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
+       int id, report_length, supported_reports = 0;
 
-       if (id >= HID_MAX_IDS || id < 0) {
-               hid_err(hdev, "invalid HID report id %u\n", id);
-               return false;
+       id = REPORT_ID_HIDPP_SHORT;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_SHORT_LENGTH)
+                       goto bad_device;
+
+               supported_reports++;
        }
 
+       id = REPORT_ID_HIDPP_LONG;
        report_length = hidpp_get_report_length(hdev, id);
-       if (!report_length)
-               return optional;
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH)
+                       goto bad_device;
 
-       if (report_length < expected_length) {
-               hid_warn(hdev, "not enough values in hidpp report %d\n", id);
-               return false;
+               supported_reports++;
        }
 
-       return true;
-}
+       id = REPORT_ID_HIDPP_VERY_LONG;
+       report_length = hidpp_get_report_length(hdev, id);
+       if (report_length) {
+               if (report_length < HIDPP_REPORT_LONG_LENGTH ||
+                   report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
+                       goto bad_device;
 
-static bool hidpp_validate_device(struct hid_device *hdev)
-{
-       return hidpp_validate_report(hdev, REPORT_ID_HIDPP_SHORT,
-                                    HIDPP_REPORT_SHORT_LENGTH, false) &&
-              hidpp_validate_report(hdev, REPORT_ID_HIDPP_LONG,
-                                    HIDPP_REPORT_LONG_LENGTH, true);
+               supported_reports++;
+               hidpp->very_long_report_length = report_length;
+       }
+
+       return supported_reports;
+
+bad_device:
+       hid_warn(hdev, "not enough values in hidpp report %d\n", id);
+       return false;
 }
 
 static bool hidpp_application_equals(struct hid_device *hdev,
@@ -3505,6 +3536,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        int ret;
        bool connected;
        unsigned int connect_mask = HID_CONNECT_DEFAULT;
+       struct hidpp_ff_private_data data;
 
        /* report_fixup needs drvdata to be set before we call hid_parse */
        hidpp = devm_kzalloc(&hdev->dev, sizeof(*hidpp), GFP_KERNEL);
@@ -3531,11 +3563,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
        }
 
-       hidpp->very_long_report_length =
-               hidpp_get_report_length(hdev, REPORT_ID_HIDPP_VERY_LONG);
-       if (hidpp->very_long_report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH)
-               hidpp->very_long_report_length = HIDPP_REPORT_VERY_LONG_MAX_LENGTH;
-
        if (id->group == HID_GROUP_LOGITECH_DJ_DEVICE)
                hidpp->quirks |= HIDPP_QUIRK_UNIFYING;
 
@@ -3614,7 +3641,7 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                if (ret)
                        goto hid_hw_init_fail;
        } else if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)) {
-               ret = g920_get_config(hidpp);
+               ret = g920_get_config(hidpp, &data);
                if (ret)
                        goto hid_hw_init_fail;
        }
@@ -3636,6 +3663,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto hid_hw_start_fail;
        }
 
+       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920) {
+               ret = hidpp_ff_init(hidpp, &data);
+               if (ret)
+                       hid_warn(hidpp->hid_dev,
+                    "Unable to initialize force feedback support, errno %d\n",
+                                ret);
+       }
+
        return ret;
 
 hid_hw_init_fail:
@@ -3658,9 +3693,6 @@ static void hidpp_remove(struct hid_device *hdev)
 
        sysfs_remove_group(&hdev->dev.kobj, &ps_attribute_group);
 
-       if (hidpp->quirks & HIDPP_QUIRK_CLASS_G920)
-               hidpp_ff_deinit(hdev);
-
        hid_hw_stop(hdev);
        cancel_work_sync(&hidpp->work);
        mutex_destroy(&hidpp->send_mutex);
index 2cf83856f2e41c7e6c1456224988eaaa6a370e0f..2d8b589201a4e80c7d2dc7c4a6df35344840bd9a 100644 (file)
@@ -328,11 +328,17 @@ static int ms_play_effect(struct input_dev *dev, void *data,
 
 static int ms_init_ff(struct hid_device *hdev)
 {
-       struct hid_input *hidinput = list_entry(hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        struct ms_data *ms = hid_get_drvdata(hdev);
 
+       if (list_empty(&hdev->inputs)) {
+               hid_err(hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        if (!(ms->quirks & MS_QUIRK_FF))
                return 0;
 
index 5a3b3d974d849c28503f6f6390312384d6e1868f..2666af02d5c1a11f1a32f098eecb61ddc458cf0a 100644 (file)
@@ -516,7 +516,7 @@ static void pcmidi_setup_extra_keys(
                MY PICTURES =>  KEY_WORDPROCESSOR
                MY MUSIC=>      KEY_SPREADSHEET
        */
-       unsigned int keys[] = {
+       static const unsigned int keys[] = {
                KEY_FN,
                KEY_MESSENGER, KEY_CALENDAR,
                KEY_ADDRESSBOOK, KEY_DOCUMENTS,
@@ -532,7 +532,7 @@ static void pcmidi_setup_extra_keys(
                0
        };
 
-       unsigned int *pkeys = &keys[0];
+       const unsigned int *pkeys = &keys[0];
        unsigned short i;
 
        if (pm->ifnum != 1)  /* only set up ONCE for interace 1 */
index 73c0f7a95e2dcea13d10b37176c9d7678074af1c..4c6ed6ef31f1e731be29d06239fe972cc2e4eb4b 100644 (file)
@@ -2254,9 +2254,15 @@ static int sony_play_effect(struct input_dev *dev, void *data,
 
 static int sony_init_ff(struct sony_sc *sc)
 {
-       struct hid_input *hidinput = list_entry(sc->hdev->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
+
+       if (list_empty(&sc->hdev->inputs)) {
+               hid_err(sc->hdev, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(sc->hdev->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
 
        input_set_capability(input_dev, EV_FF, FF_RUMBLE);
        return input_ff_create_memless(input_dev, NULL, sony_play_effect);
index bdfc5ff3b2c5c9211291f71d8f0304e233f076bd..90acef3045369e15f72abd7c72580952f080c518 100644 (file)
@@ -124,12 +124,18 @@ static int tmff_init(struct hid_device *hid, const signed short *ff_bits)
        struct tmff_device *tmff;
        struct hid_report *report;
        struct list_head *report_list;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                                       struct hid_input, list);
-       struct input_dev *input_dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *input_dev;
        int error;
        int i;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       input_dev = hidinput->input;
+
        tmff = kzalloc(sizeof(struct tmff_device), GFP_KERNEL);
        if (!tmff)
                return -ENOMEM;
index f90959e940287912e7b64ceed1eb2424f484455d..3abaca045869b0943d179a1af61be564ebdba299 100644 (file)
@@ -54,11 +54,17 @@ static int zpff_init(struct hid_device *hid)
 {
        struct zpff_device *zpff;
        struct hid_report *report;
-       struct hid_input *hidinput = list_entry(hid->inputs.next,
-                                               struct hid_input, list);
-       struct input_dev *dev = hidinput->input;
+       struct hid_input *hidinput;
+       struct input_dev *dev;
        int i, error;
 
+       if (list_empty(&hid->inputs)) {
+               hid_err(hid, "no inputs found\n");
+               return -ENODEV;
+       }
+       hidinput = list_entry(hid->inputs.next, struct hid_input, list);
+       dev = hidinput->input;
+
        for (i = 0; i < 4; i++) {
                report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
                if (!report)
index 2a7c6e33bb1c4322b6851e42988f593e69ae527f..04c088131e044bc649bbe7b414bb892bd01fe2b0 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pm.h>
-#include <linux/pm_runtime.h>
 #include <linux/device.h>
 #include <linux/wait.h>
 #include <linux/err.h>
@@ -48,8 +47,6 @@
 /* quirks to control the device */
 #define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV       BIT(0)
 #define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET       BIT(1)
-#define I2C_HID_QUIRK_NO_RUNTIME_PM            BIT(2)
-#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP                BIT(3)
 #define I2C_HID_QUIRK_BOGUS_IRQ                        BIT(4)
 
 /* flags */
@@ -172,14 +169,7 @@ static const struct i2c_hid_quirks {
        { USB_VENDOR_ID_WEIDA, HID_ANY_ID,
                I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
        { I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
-               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
-               I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
-       { USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_8001,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
-       { I2C_VENDOR_ID_GOODIX, I2C_DEVICE_ID_GOODIX_01F0,
-               I2C_HID_QUIRK_NO_RUNTIME_PM },
+               I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID,
                 I2C_HID_QUIRK_BOGUS_IRQ },
        { 0, 0 }
@@ -397,7 +387,6 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
 {
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        int ret;
-       unsigned long now, delay;
 
        i2c_hid_dbg(ihid, "%s\n", __func__);
 
@@ -415,22 +404,9 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
                        goto set_pwr_exit;
        }
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_ON) {
-               now = jiffies;
-               if (time_after(ihid->sleep_delay, now)) {
-                       delay = jiffies_to_usecs(ihid->sleep_delay - now);
-                       usleep_range(delay, delay + 1);
-               }
-       }
-
        ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
                0, NULL, 0, NULL, 0);
 
-       if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
-           power_state == I2C_HID_PWR_SLEEP)
-               ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
-
        if (ret)
                dev_err(&client->dev, "failed to change power setting.\n");
 
@@ -471,8 +447,12 @@ static int i2c_hid_hwreset(struct i2c_client *client)
        if (ret) {
                dev_err(&client->dev, "failed to reset device.\n");
                i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+               goto out_unlock;
        }
 
+       /* At least some SIS devices need this after reset */
+       ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
+
 out_unlock:
        mutex_unlock(&ihid->reset_lock);
        return ret;
@@ -791,11 +771,6 @@ static int i2c_hid_open(struct hid_device *hid)
 {
        struct i2c_client *client = hid->driver_data;
        struct i2c_hid *ihid = i2c_get_clientdata(client);
-       int ret = 0;
-
-       ret = pm_runtime_get_sync(&client->dev);
-       if (ret < 0)
-               return ret;
 
        set_bit(I2C_HID_STARTED, &ihid->flags);
        return 0;
@@ -807,27 +782,6 @@ static void i2c_hid_close(struct hid_device *hid)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
 
        clear_bit(I2C_HID_STARTED, &ihid->flags);
-
-       /* Save some power */
-       pm_runtime_put(&client->dev);
-}
-
-static int i2c_hid_power(struct hid_device *hid, int lvl)
-{
-       struct i2c_client *client = hid->driver_data;
-       struct i2c_hid *ihid = i2c_get_clientdata(client);
-
-       i2c_hid_dbg(ihid, "%s lvl:%d\n", __func__, lvl);
-
-       switch (lvl) {
-       case PM_HINT_FULLON:
-               pm_runtime_get_sync(&client->dev);
-               break;
-       case PM_HINT_NORMAL:
-               pm_runtime_put(&client->dev);
-               break;
-       }
-       return 0;
 }
 
 struct hid_ll_driver i2c_hid_ll_driver = {
@@ -836,7 +790,6 @@ struct hid_ll_driver i2c_hid_ll_driver = {
        .stop = i2c_hid_stop,
        .open = i2c_hid_open,
        .close = i2c_hid_close,
-       .power = i2c_hid_power,
        .output_report = i2c_hid_output_report,
        .raw_request = i2c_hid_raw_request,
 };
@@ -1104,9 +1057,6 @@ static int i2c_hid_probe(struct i2c_client *client,
 
        i2c_hid_acpi_fix_up_power(&client->dev);
 
-       pm_runtime_get_noresume(&client->dev);
-       pm_runtime_set_active(&client->dev);
-       pm_runtime_enable(&client->dev);
        device_enable_async_suspend(&client->dev);
 
        /* Make sure there is something at this address */
@@ -1114,16 +1064,16 @@ static int i2c_hid_probe(struct i2c_client *client,
        if (ret < 0) {
                dev_dbg(&client->dev, "nothing at this address: %d\n", ret);
                ret = -ENXIO;
-               goto err_pm;
+               goto err_regulator;
        }
 
        ret = i2c_hid_fetch_hid_descriptor(ihid);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        ret = i2c_hid_init_irq(client);
        if (ret < 0)
-               goto err_pm;
+               goto err_regulator;
 
        hid = hid_allocate_device();
        if (IS_ERR(hid)) {
@@ -1154,9 +1104,6 @@ static int i2c_hid_probe(struct i2c_client *client,
                goto err_mem_free;
        }
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_put(&client->dev);
-
        return 0;
 
 err_mem_free:
@@ -1165,10 +1112,6 @@ err_mem_free:
 err_irq:
        free_irq(client->irq, ihid);
 
-err_pm:
-       pm_runtime_put_noidle(&client->dev);
-       pm_runtime_disable(&client->dev);
-
 err_regulator:
        regulator_bulk_disable(ARRAY_SIZE(ihid->pdata.supplies),
                               ihid->pdata.supplies);
@@ -1181,12 +1124,6 @@ static int i2c_hid_remove(struct i2c_client *client)
        struct i2c_hid *ihid = i2c_get_clientdata(client);
        struct hid_device *hid;
 
-       if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
-               pm_runtime_get_sync(&client->dev);
-       pm_runtime_disable(&client->dev);
-       pm_runtime_set_suspended(&client->dev);
-       pm_runtime_put_noidle(&client->dev);
-
        hid = ihid->hid;
        hid_destroy_device(hid);
 
@@ -1219,25 +1156,15 @@ static int i2c_hid_suspend(struct device *dev)
        int wake_status;
 
        if (hid->driver && hid->driver->suspend) {
-               /*
-                * Wake up the device so that IO issues in
-                * HID driver's suspend code can succeed.
-                */
-               ret = pm_runtime_resume(dev);
-               if (ret < 0)
-                       return ret;
-
                ret = hid->driver->suspend(hid, PMSG_SUSPEND);
                if (ret < 0)
                        return ret;
        }
 
-       if (!pm_runtime_suspended(dev)) {
-               /* Save some power */
-               i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
+       /* Save some power */
+       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
 
-               disable_irq(client->irq);
-       }
+       disable_irq(client->irq);
 
        if (device_may_wakeup(&client->dev)) {
                wake_status = enable_irq_wake(client->irq);
@@ -1279,11 +1206,6 @@ static int i2c_hid_resume(struct device *dev)
                                wake_status);
        }
 
-       /* We'll resume to full power */
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-
        enable_irq(client->irq);
 
        /* Instead of resetting device, simply powers the device on. This
@@ -1304,30 +1226,8 @@ static int i2c_hid_resume(struct device *dev)
 }
 #endif
 
-#ifdef CONFIG_PM
-static int i2c_hid_runtime_suspend(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       i2c_hid_set_power(client, I2C_HID_PWR_SLEEP);
-       disable_irq(client->irq);
-       return 0;
-}
-
-static int i2c_hid_runtime_resume(struct device *dev)
-{
-       struct i2c_client *client = to_i2c_client(dev);
-
-       enable_irq(client->irq);
-       i2c_hid_set_power(client, I2C_HID_PWR_ON);
-       return 0;
-}
-#endif
-
 static const struct dev_pm_ops i2c_hid_pm = {
        SET_SYSTEM_SLEEP_PM_OPS(i2c_hid_suspend, i2c_hid_resume)
-       SET_RUNTIME_PM_OPS(i2c_hid_runtime_suspend, i2c_hid_runtime_resume,
-                          NULL)
 };
 
 static const struct i2c_device_id i2c_hid_id_table[] = {
index 75078c83be1af579c9e7ad804271d620d3849dc2..d31ea82b84c173033dd1a6582122ec9197671fbc 100644 (file)
@@ -322,6 +322,25 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
                },
                .driver_data = (void *)&sipodev_desc
        },
+       {
+               /*
+                * There are at least 2 Primebook C11B versions, the older
+                * version has a product-name of "Primebook C11B", and a
+                * bios version / release / firmware revision of:
+                * V2.1.2 / 05/03/2018 / 18.2
+                * The new version has "PRIMEBOOK C11B" as product-name and a
+                * bios version / release / firmware revision of:
+                * CFALKSW05_BIOS_V1.1.2 / 11/19/2018 / 19.2
+                * Only the older version needs this quirk, note the newer
+                * version will not match as it has a different product-name.
+                */
+               .ident = "Trekstor Primebook C11B",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Primebook C11B"),
+               },
+               .driver_data = (void *)&sipodev_desc
+       },
        {
                .ident = "Direkt-Tek DTLAPY116-2",
                .matches = {
index 1b0a0cc605e7740d07d8e0fd8c976105abdfae2e..513d7a4a1b8acd176a673375d6f594c3820e116a 100644 (file)
@@ -84,7 +84,7 @@ int ishtp_cl_alloc_tx_ring(struct ishtp_cl *cl)
        return  0;
 out:
        dev_err(&cl->device->dev, "error in allocating Tx pool\n");
-       ishtp_cl_free_rx_ring(cl);
+       ishtp_cl_free_tx_ring(cl);
        return  -ENOMEM;
 }
 
index 4a7f8d363220dc1cd507e0476edc92a7d7724ad9..203d27d198b818a364c2614097fb62f3748f0d21 100644 (file)
@@ -202,6 +202,21 @@ static inline void wacom_schedule_work(struct wacom_wac *wacom_wac,
        }
 }
 
+/*
+ * Convert a signed 32-bit integer to an unsigned n-bit integer. Undoes
+ * the normally-helpful work of 'hid_snto32' for fields that use signed
+ * ranges for questionable reasons.
+ */
+static inline __u32 wacom_s32tou(s32 value, __u8 n)
+{
+       switch (n) {
+       case 8:  return ((__u8)value);
+       case 16: return ((__u16)value);
+       case 32: return ((__u32)value);
+       }
+       return value & (1 << (n - 1)) ? value & (~(~0U << n)) : value;
+}
+
 extern const struct hid_device_id wacom_ids[];
 
 void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
index 2b0a5b8ca6e692850aa71ab21c3663fc5b56041b..ccb74529bc78243689d0720bbe2d81cfd8a52004 100644 (file)
@@ -2303,7 +2303,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
        case HID_DG_TOOLSERIALNUMBER:
                if (value) {
                        wacom_wac->serial[0] = (wacom_wac->serial[0] & ~0xFFFFFFFFULL);
-                       wacom_wac->serial[0] |= (__u32)value;
+                       wacom_wac->serial[0] |= wacom_s32tou(value, field->report_size);
                }
                return;
        case HID_DG_TWIST:
@@ -2319,15 +2319,17 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                return;
        case WACOM_HID_WD_SERIALHI:
                if (value) {
+                       __u32 raw_value = wacom_s32tou(value, field->report_size);
+
                        wacom_wac->serial[0] = (wacom_wac->serial[0] & 0xFFFFFFFF);
-                       wacom_wac->serial[0] |= ((__u64)value) << 32;
+                       wacom_wac->serial[0] |= ((__u64)raw_value) << 32;
                        /*
                         * Non-USI EMR devices may contain additional tool type
                         * information here. See WACOM_HID_WD_TOOLTYPE case for
                         * more details.
                         */
                        if (value >> 20 == 1) {
-                               wacom_wac->id[0] |= value & 0xFFFFF;
+                               wacom_wac->id[0] |= raw_value & 0xFFFFF;
                        }
                }
                return;
@@ -2339,7 +2341,7 @@ static void wacom_wac_pen_event(struct hid_device *hdev, struct hid_field *field
                 * bitwise OR so the complete value can be built
                 * up over time :(
                 */
-               wacom_wac->id[0] |= value;
+               wacom_wac->id[0] |= wacom_s32tou(value, field->report_size);
                return;
        case WACOM_HID_WD_OFFSETLEFT:
                if (features->offset_left && value != features->offset_left)
index 391f0b225c9ae4698c3cc3d9958b10873cf861a0..53a60c81e220d805fbcf28ce49e95019b6320f0b 100644 (file)
@@ -912,6 +912,7 @@ static void vmbus_shutdown(struct device *child_device)
                drv->shutdown(dev);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /*
  * vmbus_suspend - Suspend a vmbus device
  */
@@ -949,6 +950,7 @@ static int vmbus_resume(struct device *child_device)
 
        return drv->resume(dev);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 /*
  * vmbus_device_release - Final callback release of the vmbus child device
@@ -1070,6 +1072,7 @@ msg_handled:
        vmbus_signal_eom(msg, message_type);
 }
 
+#ifdef CONFIG_PM_SLEEP
 /*
  * Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
  * hibernation, because hv_sock connections can not persist across hibernation.
@@ -1105,6 +1108,7 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
                      vmbus_connection.work_queue,
                      &ctx->work);
 }
+#endif /* CONFIG_PM_SLEEP */
 
 /*
  * Direct callback for channels using other deferred processing
@@ -2125,6 +2129,7 @@ acpi_walk_err:
        return ret_val;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int vmbus_bus_suspend(struct device *dev)
 {
        struct vmbus_channel *channel, *sc;
@@ -2247,6 +2252,7 @@ static int vmbus_bus_resume(struct device *dev)
 
        return 0;
 }
+#endif /* CONFIG_PM_SLEEP */
 
 static const struct acpi_device_id vmbus_acpi_device_ids[] = {
        {"VMBUS", 0},
index 0037e2bdacd6b25ea2edca74fa7d11b542e0b843..8a51dcf055eab73222c828a64ba7c4a5b743aefb 100644 (file)
@@ -170,7 +170,7 @@ static inline int ina3221_wait_for_data(struct ina3221_data *ina)
 
        /* Polling the CVRF bit to make sure read data is ready */
        return regmap_field_read_poll_timeout(ina->fields[F_CVRF],
-                                             cvrf, cvrf, wait, 100000);
+                                             cvrf, cvrf, wait, wait * 2);
 }
 
 static int ina3221_read_value(struct ina3221_data *ina, unsigned int reg,
index 95b447cfa24cb82a118fa62f3bc4c99caec33153..281c81edabc65740df60c154b4b1e59c4c995fc7 100644 (file)
 #define FANCTL1_FMR_REG                0x00    /* Bank 3; 1 reg per channel */
 #define FANCTL1_OUT_REG                0x10    /* Bank 3; 1 reg per channel */
 
+#define VOLT_MONITOR_MODE      0x0
+#define THERMAL_DIODE_MODE     0x1
+#define THERMISTOR_MODE                0x3
+
 #define ENABLE_TSI     BIT(1)
 
 static const unsigned short normal_i2c[] = {
@@ -99,6 +103,8 @@ struct nct7904_data {
        u8 enable_dts;
        u8 has_dts;
        u8 temp_mode; /* 0: TR mode, 1: TD mode */
+       u8 fan_alarm[2];
+       u8 vsen_alarm[3];
 };
 
 /* Access functions */
@@ -214,7 +220,15 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
                                       SMI_STS5_REG + (channel >> 3));
                if (ret < 0)
                        return ret;
-               *val = (ret >> (channel & 0x07)) & 1;
+               if (!data->fan_alarm[channel >> 3])
+                       data->fan_alarm[channel >> 3] = ret & 0xff;
+               else
+                       /* If there is new alarm showing up */
+                       data->fan_alarm[channel >> 3] |= (ret & 0xff);
+               *val = (data->fan_alarm[channel >> 3] >> (channel & 0x07)) & 1;
+               /* Needs to clean the alarm if alarm existing */
+               if (*val)
+                       data->fan_alarm[channel >> 3] ^= 1 << (channel & 0x07);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -298,7 +312,15 @@ static int nct7904_read_in(struct device *dev, u32 attr, int channel,
                                       SMI_STS1_REG + (index >> 3));
                if (ret < 0)
                        return ret;
-               *val = (ret >> (index & 0x07)) & 1;
+               if (!data->vsen_alarm[index >> 3])
+                       data->vsen_alarm[index >> 3] = ret & 0xff;
+               else
+                       /* If there is new alarm showing up */
+                       data->vsen_alarm[index >> 3] |= (ret & 0xff);
+               *val = (data->vsen_alarm[index >> 3] >> (index & 0x07)) & 1;
+               /* Needs to clean the alarm if alarm existing */
+               if (*val)
+                       data->vsen_alarm[index >> 3] ^= 1 << (index & 0x07);
                return 0;
        default:
                return -EOPNOTSUPP;
@@ -915,12 +937,20 @@ static int nct7904_probe(struct i2c_client *client,
 
        data->temp_mode = 0;
        for (i = 0; i < 4; i++) {
-               val = (ret & (0x03 << i)) >> (i * 2);
+               val = (ret >> (i * 2)) & 0x03;
                bit = (1 << i);
-               if (val == 0)
+               if (val == VOLT_MONITOR_MODE) {
                        data->tcpu_mask &= ~bit;
-               else if (val == 0x1 || val == 0x2)
+               } else if (val == THERMAL_DIODE_MODE && i < 2) {
                        data->temp_mode |= bit;
+                       data->vsen_mask &= ~(0x06 << (i * 2));
+               } else if (val == THERMISTOR_MODE) {
+                       data->vsen_mask &= ~(0x02 << (i * 2));
+               } else {
+                       /* Reserved */
+                       data->tcpu_mask &= ~bit;
+                       data->vsen_mask &= ~(0x06 << (i * 2));
+               }
        }
 
        /* PECI */
index fa66951b05d06670ffdb79ff7b1532574177594a..7b098ff5f5dd34b5bc9e8a0553a5afcd84b722a3 100644 (file)
 #define ASPEED_I2CD_S_TX_CMD                           BIT(2)
 #define ASPEED_I2CD_M_TX_CMD                           BIT(1)
 #define ASPEED_I2CD_M_START_CMD                                BIT(0)
+#define ASPEED_I2CD_MASTER_CMDS_MASK                                          \
+               (ASPEED_I2CD_M_STOP_CMD |                                      \
+                ASPEED_I2CD_M_S_RX_CMD_LAST |                                 \
+                ASPEED_I2CD_M_RX_CMD |                                        \
+                ASPEED_I2CD_M_TX_CMD |                                        \
+                ASPEED_I2CD_M_START_CMD)
 
 /* 0x18 : I2CD Slave Device Address Register   */
 #define ASPEED_I2CD_DEV_ADDR_MASK                      GENMASK(6, 0)
@@ -336,18 +342,19 @@ static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus)
        struct i2c_msg *msg = &bus->msgs[bus->msgs_index];
        u8 slave_addr = i2c_8bit_addr_from_msg(msg);
 
-       bus->master_state = ASPEED_I2C_MASTER_START;
-
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
        /*
         * If it's requested in the middle of a slave session, set the master
         * state to 'pending' then H/W will continue handling this master
         * command when the bus comes back to the idle state.
         */
-       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
+       if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE) {
                bus->master_state = ASPEED_I2C_MASTER_PENDING;
+               return;
+       }
 #endif /* CONFIG_I2C_SLAVE */
 
+       bus->master_state = ASPEED_I2C_MASTER_START;
        bus->buf_index = 0;
 
        if (msg->flags & I2C_M_RD) {
@@ -422,20 +429,6 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
                }
        }
 
-#if IS_ENABLED(CONFIG_I2C_SLAVE)
-       /*
-        * A pending master command will be started by H/W when the bus comes
-        * back to idle state after completing a slave operation so change the
-        * master state from 'pending' to 'start' at here if slave is inactive.
-        */
-       if (bus->master_state == ASPEED_I2C_MASTER_PENDING) {
-               if (bus->slave_state != ASPEED_I2C_SLAVE_INACTIVE)
-                       goto out_no_complete;
-
-               bus->master_state = ASPEED_I2C_MASTER_START;
-       }
-#endif /* CONFIG_I2C_SLAVE */
-
        /* Master is not currently active, irq was for someone else. */
        if (bus->master_state == ASPEED_I2C_MASTER_INACTIVE ||
            bus->master_state == ASPEED_I2C_MASTER_PENDING)
@@ -462,11 +455,15 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
 #if IS_ENABLED(CONFIG_I2C_SLAVE)
                /*
                 * If a peer master starts a xfer immediately after it queues a
-                * master command, change its state to 'pending' then H/W will
-                * continue the queued master xfer just after completing the
-                * slave mode session.
+                * master command, clear the queued master command and change
+                * its state to 'pending'. To simplify handling of pending
+                * cases, it uses S/W solution instead of H/W command queue
+                * handling.
                 */
                if (unlikely(irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH)) {
+                       writel(readl(bus->base + ASPEED_I2C_CMD_REG) &
+                               ~ASPEED_I2CD_MASTER_CMDS_MASK,
+                              bus->base + ASPEED_I2C_CMD_REG);
                        bus->master_state = ASPEED_I2C_MASTER_PENDING;
                        dev_dbg(bus->dev,
                                "master goes pending due to a slave start\n");
@@ -629,6 +626,14 @@ static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id)
                        irq_handled |= aspeed_i2c_master_irq(bus,
                                                             irq_remaining);
        }
+
+       /*
+        * Start a pending master command at here if a slave operation is
+        * completed.
+        */
+       if (bus->master_state == ASPEED_I2C_MASTER_PENDING &&
+           bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
+               aspeed_i2c_do_start(bus);
 #else
        irq_handled = aspeed_i2c_master_irq(bus, irq_remaining);
 #endif /* CONFIG_I2C_SLAVE */
@@ -691,6 +696,15 @@ static int aspeed_i2c_master_xfer(struct i2c_adapter *adap,
                     ASPEED_I2CD_BUS_BUSY_STS))
                        aspeed_i2c_recover_bus(bus);
 
+               /*
+                * If timed out and the state is still pending, drop the pending
+                * master command.
+                */
+               spin_lock_irqsave(&bus->lock, flags);
+               if (bus->master_state == ASPEED_I2C_MASTER_PENDING)
+                       bus->master_state = ASPEED_I2C_MASTER_INACTIVE;
+               spin_unlock_irqrestore(&bus->lock, flags);
+
                return -ETIMEDOUT;
        }
 
index 29eae1bf4f861a39078091644aa329326aa1c2ed..2152ec5f535c19316b0d7800afbf1bd3a44e5768 100644 (file)
@@ -875,7 +875,7 @@ static irqreturn_t mtk_i2c_irq(int irqno, void *dev_id)
 
 static u32 mtk_i2c_functionality(struct i2c_adapter *adap)
 {
-       if (adap->quirks->flags & I2C_AQ_NO_ZERO_LEN)
+       if (i2c_check_quirks(adap, I2C_AQ_NO_ZERO_LEN))
                return I2C_FUNC_I2C |
                        (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
        else
index d36cf08461f76d78edaff2250b21324b2996775c..b24e7b937f210bbb9fc5279f386c1f0e6044f528 100644 (file)
@@ -305,7 +305,7 @@ struct stm32f7_i2c_dev {
        struct regmap *regmap;
 };
 
-/**
+/*
  * All these values are coming from I2C Specification, Version 6.0, 4th of
  * April 2014.
  *
@@ -1192,6 +1192,8 @@ static void stm32f7_i2c_slave_start(struct stm32f7_i2c_dev *i2c_dev)
                        STM32F7_I2C_CR1_TXIE;
                stm32f7_i2c_set_bits(base + STM32F7_I2C_CR1, mask);
 
+               /* Write 1st data byte */
+               writel_relaxed(value, base + STM32F7_I2C_TXDR);
        } else {
                /* Notify i2c slave that new write transfer is starting */
                i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value);
@@ -1501,7 +1503,7 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
        void __iomem *base = i2c_dev->base;
        struct device *dev = i2c_dev->dev;
        struct stm32_i2c_dma *dma = i2c_dev->dma;
-       u32 mask, status;
+       u32 status;
 
        status = readl_relaxed(i2c_dev->base + STM32F7_I2C_ISR);
 
@@ -1526,12 +1528,15 @@ static irqreturn_t stm32f7_i2c_isr_error(int irq, void *data)
                f7_msg->result = -EINVAL;
        }
 
-       /* Disable interrupts */
-       if (stm32f7_i2c_is_slave_registered(i2c_dev))
-               mask = STM32F7_I2C_XFER_IRQ_MASK;
-       else
-               mask = STM32F7_I2C_ALL_IRQ_MASK;
-       stm32f7_i2c_disable_irq(i2c_dev, mask);
+       if (!i2c_dev->slave_running) {
+               u32 mask;
+               /* Disable interrupts */
+               if (stm32f7_i2c_is_slave_registered(i2c_dev))
+                       mask = STM32F7_I2C_XFER_IRQ_MASK;
+               else
+                       mask = STM32F7_I2C_ALL_IRQ_MASK;
+               stm32f7_i2c_disable_irq(i2c_dev, mask);
+       }
 
        /* Disable dma */
        if (i2c_dev->use_dma) {
index 055227cb3d43c4bb6f51f2611aa57d5fc9b9936a..67b8817995c047b791c638da6e1131f9f967f222 100644 (file)
@@ -474,12 +474,17 @@ static int adxl372_configure_fifo(struct adxl372_state *st)
        if (ret < 0)
                return ret;
 
-       fifo_samples = st->watermark & 0xFF;
+       /*
+        * watermark stores the number of sets; we need to write the FIFO
+        * registers with the number of samples
+        */
+       fifo_samples = (st->watermark * st->fifo_set_size);
        fifo_ctl = ADXL372_FIFO_CTL_FORMAT_MODE(st->fifo_format) |
                   ADXL372_FIFO_CTL_MODE_MODE(st->fifo_mode) |
-                  ADXL372_FIFO_CTL_SAMPLES_MODE(st->watermark);
+                  ADXL372_FIFO_CTL_SAMPLES_MODE(fifo_samples);
 
-       ret = regmap_write(st->regmap, ADXL372_FIFO_SAMPLES, fifo_samples);
+       ret = regmap_write(st->regmap,
+                          ADXL372_FIFO_SAMPLES, fifo_samples & 0xFF);
        if (ret < 0)
                return ret;
 
@@ -548,8 +553,7 @@ static irqreturn_t adxl372_trigger_handler(int irq, void  *p)
                        goto err;
 
                /* Each sample is 2 bytes */
-               for (i = 0; i < fifo_entries * sizeof(u16);
-                    i += st->fifo_set_size * sizeof(u16))
+               for (i = 0; i < fifo_entries; i += st->fifo_set_size)
                        iio_push_to_buffers(indio_dev, &st->fifo_buf[i]);
        }
 err:
@@ -571,6 +575,14 @@ static int adxl372_setup(struct adxl372_state *st)
                return -ENODEV;
        }
 
+       /*
+        * Perform a software reset to make sure the device is in a consistent
+        * state after start up.
+        */
+       ret = regmap_write(st->regmap, ADXL372_RESET, ADXL372_RESET_CODE);
+       if (ret < 0)
+               return ret;
+
        ret = adxl372_set_op_mode(st, ADXL372_STANDBY);
        if (ret < 0)
                return ret;
index cf6c0e3a83d381db42c202da4897cbf4a620e717..121b4e89f038c7782d86d8aaee0fd6ba0043721a 100644 (file)
 #define BMC150_ACCEL_SLEEP_1_SEC               0x0F
 
 #define BMC150_ACCEL_REG_TEMP                  0x08
-#define BMC150_ACCEL_TEMP_CENTER_VAL           24
+#define BMC150_ACCEL_TEMP_CENTER_VAL           23
 
 #define BMC150_ACCEL_AXIS_TO_REG(axis) (BMC150_ACCEL_REG_XOUT_L + (axis * 2))
 #define BMC150_AUTO_SUSPEND_DELAY_MS           2000
index 5a3ca5904dedc421d501548cf656775a22ca4853..f658012baad88d11fd148306e152cc73b8ea5d6d 100644 (file)
@@ -810,10 +810,10 @@ static int ad799x_probe(struct i2c_client *client,
 
        ret = ad799x_write_config(st, st->chip_config->default_config);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        ret = ad799x_read_config(st);
        if (ret < 0)
-               goto error_disable_reg;
+               goto error_disable_vref;
        st->config = ret;
 
        ret = iio_triggered_buffer_setup(indio_dev, NULL,
index adc9cf7a075d200702cc34665e271a0e174d241a..8ea2aed6d6f5a2179f5d6b0cc51abf4f42fa93ad 100644 (file)
@@ -7,6 +7,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <linux/dmi.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/device.h>
 #define AXP288_ADC_EN_MASK                             0xF0
 #define AXP288_ADC_TS_ENABLE                           0x01
 
+#define AXP288_ADC_TS_BIAS_MASK                                GENMASK(5, 4)
+#define AXP288_ADC_TS_BIAS_20UA                                (0 << 4)
+#define AXP288_ADC_TS_BIAS_40UA                                (1 << 4)
+#define AXP288_ADC_TS_BIAS_60UA                                (2 << 4)
+#define AXP288_ADC_TS_BIAS_80UA                                (3 << 4)
 #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK              GENMASK(1, 0)
 #define AXP288_ADC_TS_CURRENT_OFF                      (0 << 0)
 #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING         (1 << 0)
@@ -177,10 +183,36 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
        return ret;
 }
 
+/*
+ * We rely on the machine's firmware to correctly setup the TS pin bias current
+ * at boot. This lists systems with broken fw where we need to set it ourselves.
+ */
+static const struct dmi_system_id axp288_adc_ts_bias_override[] = {
+       {
+               /* Lenovo Ideapad 100S (11 inch) */
+               .matches = {
+                 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 100S-11IBY"),
+               },
+               .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA,
+       },
+       {}
+};
+
 static int axp288_adc_initialize(struct axp288_adc_info *info)
 {
+       const struct dmi_system_id *bias_override;
        int ret, adc_enable_val;
 
+       bias_override = dmi_first_match(axp288_adc_ts_bias_override);
+       if (bias_override) {
+               ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+                                        AXP288_ADC_TS_BIAS_MASK,
+                                        (uintptr_t)bias_override->driver_data);
+               if (ret)
+                       return ret;
+       }
+
        /*
         * Determine if the TS pin is enabled and set the TS current-source
         * accordingly.
index 88c7fe15003b7179d8c4e565fcd7bb3415289baa..62e6c8badd22a8d645c9edb1e0d10784157a5b61 100644 (file)
@@ -100,14 +100,14 @@ struct hx711_data {
 
 static int hx711_cycle(struct hx711_data *hx711_data)
 {
-       int val;
+       unsigned long flags;
 
        /*
         * if preempted for more then 60us while PD_SCK is high:
         * hx711 is going in reset
         * ==> measuring is false
         */
-       preempt_disable();
+       local_irq_save(flags);
        gpiod_set_value(hx711_data->gpiod_pd_sck, 1);
 
        /*
@@ -117,7 +117,6 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       val = gpiod_get_value(hx711_data->gpiod_dout);
        /*
         * here we are not waiting for 0.2 us as suggested by the datasheet,
         * because the oscilloscope showed in a test scenario
@@ -125,7 +124,7 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         * and 0.56 us for PD_SCK low on TI Sitara with 800 MHz
         */
        gpiod_set_value(hx711_data->gpiod_pd_sck, 0);
-       preempt_enable();
+       local_irq_restore(flags);
 
        /*
         * make it a square wave for addressing cases with capacitance on
@@ -133,7 +132,8 @@ static int hx711_cycle(struct hx711_data *hx711_data)
         */
        ndelay(hx711_data->data_ready_delay_ns);
 
-       return val;
+       /* sample as late as possible */
+       return gpiod_get_value(hx711_data->gpiod_dout);
 }
 
 static int hx711_read(struct hx711_data *hx711_data)
index 7b28d045d2719f3e3a710eaa2ffce0e3183d561e..7b27306330a351f2edbee70f202d7110a79100b3 100644 (file)
@@ -1219,6 +1219,11 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
        if (IS_ERR(base))
                return PTR_ERR(base);
 
+       priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
+                                            priv->param->regmap_config);
+       if (IS_ERR(priv->regmap))
+               return PTR_ERR(priv->regmap);
+
        irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
        if (!irq)
                return -EINVAL;
@@ -1228,11 +1233,6 @@ static int meson_sar_adc_probe(struct platform_device *pdev)
        if (ret)
                return ret;
 
-       priv->regmap = devm_regmap_init_mmio(&pdev->dev, base,
-                                            priv->param->regmap_config);
-       if (IS_ERR(priv->regmap))
-               return PTR_ERR(priv->regmap);
-
        priv->clkin = devm_clk_get(&pdev->dev, "clkin");
        if (IS_ERR(priv->clkin)) {
                dev_err(&pdev->dev, "failed to get clkin\n");
index 9b85fefc0a963f382a0365f6aefec33770a187e1..93a096a91f8c42f7f49cfab43531cdb2fd134cb9 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
-#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
-
-/* STM32F4_ADC_CSR - bit fields */
-#define STM32F4_EOC3                   BIT(17)
-#define STM32F4_EOC2                   BIT(9)
-#define STM32F4_EOC1                   BIT(1)
-
-/* STM32F4_ADC_CCR - bit fields */
-#define STM32F4_ADC_ADCPRE_SHIFT       16
-#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
-
-/* STM32H7 - common registers for all ADC instances */
-#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
-#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
-
-/* STM32H7_ADC_CSR - bit fields */
-#define STM32H7_EOC_SLV                        BIT(18)
-#define STM32H7_EOC_MST                        BIT(2)
-
-/* STM32H7_ADC_CCR - bit fields */
-#define STM32H7_PRESC_SHIFT            18
-#define STM32H7_PRESC_MASK             GENMASK(21, 18)
-#define STM32H7_CKMODE_SHIFT           16
-#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
-
 #define STM32_ADC_CORE_SLEEP_DELAY_MS  2000
 
 /* SYSCFG registers */
@@ -71,6 +44,8 @@
  * @eoc1:      adc1 end of conversion flag in @csr
  * @eoc2:      adc2 end of conversion flag in @csr
  * @eoc3:      adc3 end of conversion flag in @csr
+ * @ier:       interrupt enable register offset for each adc
+ * @eocie_msk: end of conversion interrupt enable mask in @ier
  */
 struct stm32_adc_common_regs {
        u32 csr;
@@ -78,6 +53,8 @@ struct stm32_adc_common_regs {
        u32 eoc1_msk;
        u32 eoc2_msk;
        u32 eoc3_msk;
+       u32 ier;
+       u32 eocie_msk;
 };
 
 struct stm32_adc_priv;
@@ -303,6 +280,8 @@ static const struct stm32_adc_common_regs stm32f4_adc_common_regs = {
        .eoc1_msk = STM32F4_EOC1,
        .eoc2_msk = STM32F4_EOC2,
        .eoc3_msk = STM32F4_EOC3,
+       .ier = STM32F4_ADC_CR1,
+       .eocie_msk = STM32F4_EOCIE,
 };
 
 /* STM32H7 common registers definitions */
@@ -311,8 +290,24 @@ static const struct stm32_adc_common_regs stm32h7_adc_common_regs = {
        .ccr = STM32H7_ADC_CCR,
        .eoc1_msk = STM32H7_EOC_MST,
        .eoc2_msk = STM32H7_EOC_SLV,
+       .ier = STM32H7_ADC_IER,
+       .eocie_msk = STM32H7_EOCIE,
+};
+
+static const unsigned int stm32_adc_offset[STM32_ADC_MAX_ADCS] = {
+       0, STM32_ADC_OFFSET, STM32_ADC_OFFSET * 2,
 };
 
+static unsigned int stm32_adc_eoc_enabled(struct stm32_adc_priv *priv,
+                                         unsigned int adc)
+{
+       u32 ier, offset = stm32_adc_offset[adc];
+
+       ier = readl_relaxed(priv->common.base + offset + priv->cfg->regs->ier);
+
+       return ier & priv->cfg->regs->eocie_msk;
+}
+
 /* ADC common interrupt for all instances */
 static void stm32_adc_irq_handler(struct irq_desc *desc)
 {
@@ -323,13 +318,28 @@ static void stm32_adc_irq_handler(struct irq_desc *desc)
        chained_irq_enter(chip, desc);
        status = readl_relaxed(priv->common.base + priv->cfg->regs->csr);
 
-       if (status & priv->cfg->regs->eoc1_msk)
+       /*
+        * End of conversion may be handled by using IRQ or DMA. There may be a
+        * race here when two conversions complete at the same time on several
+        * ADCs. EOC may be read 'set' for several ADCs, with:
+        * - an ADC configured to use DMA (EOC triggers the DMA request, and
+        *   is then automatically cleared by DR read in hardware)
+        * - an ADC configured to use IRQs (EOCIE bit is set. The handler must
+        *   be called in this case)
+        * So both EOC status bit in CSR and EOCIE control bit must be checked
+        * before invoking the interrupt handler (e.g. call ISR only for
+        * IRQ-enabled ADCs).
+        */
+       if (status & priv->cfg->regs->eoc1_msk &&
+           stm32_adc_eoc_enabled(priv, 0))
                generic_handle_irq(irq_find_mapping(priv->domain, 0));
 
-       if (status & priv->cfg->regs->eoc2_msk)
+       if (status & priv->cfg->regs->eoc2_msk &&
+           stm32_adc_eoc_enabled(priv, 1))
                generic_handle_irq(irq_find_mapping(priv->domain, 1));
 
-       if (status & priv->cfg->regs->eoc3_msk)
+       if (status & priv->cfg->regs->eoc3_msk &&
+           stm32_adc_eoc_enabled(priv, 2))
                generic_handle_irq(irq_find_mapping(priv->domain, 2));
 
        chained_irq_exit(chip, desc);
index 8af507b3f32d914d70307bfff7e8819b4284581d..2579d514c2a3448cbd877ec8f3567e8103fc1399 100644 (file)
  * --------------------------------------------------------
  */
 #define STM32_ADC_MAX_ADCS             3
+#define STM32_ADC_OFFSET               0x100
 #define STM32_ADCX_COMN_OFFSET         0x300
 
+/* STM32F4 - Registers for each ADC instance */
+#define STM32F4_ADC_SR                 0x00
+#define STM32F4_ADC_CR1                        0x04
+#define STM32F4_ADC_CR2                        0x08
+#define STM32F4_ADC_SMPR1              0x0C
+#define STM32F4_ADC_SMPR2              0x10
+#define STM32F4_ADC_HTR                        0x24
+#define STM32F4_ADC_LTR                        0x28
+#define STM32F4_ADC_SQR1               0x2C
+#define STM32F4_ADC_SQR2               0x30
+#define STM32F4_ADC_SQR3               0x34
+#define STM32F4_ADC_JSQR               0x38
+#define STM32F4_ADC_JDR1               0x3C
+#define STM32F4_ADC_JDR2               0x40
+#define STM32F4_ADC_JDR3               0x44
+#define STM32F4_ADC_JDR4               0x48
+#define STM32F4_ADC_DR                 0x4C
+
+/* STM32F4 - common registers for all ADC instances: 1, 2 & 3 */
+#define STM32F4_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32F4_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x04)
+
+/* STM32F4_ADC_SR - bit fields */
+#define STM32F4_STRT                   BIT(4)
+#define STM32F4_EOC                    BIT(1)
+
+/* STM32F4_ADC_CR1 - bit fields */
+#define STM32F4_RES_SHIFT              24
+#define STM32F4_RES_MASK               GENMASK(25, 24)
+#define STM32F4_SCAN                   BIT(8)
+#define STM32F4_EOCIE                  BIT(5)
+
+/* STM32F4_ADC_CR2 - bit fields */
+#define STM32F4_SWSTART                        BIT(30)
+#define STM32F4_EXTEN_SHIFT            28
+#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
+#define STM32F4_EXTSEL_SHIFT           24
+#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
+#define STM32F4_EOCS                   BIT(10)
+#define STM32F4_DDS                    BIT(9)
+#define STM32F4_DMA                    BIT(8)
+#define STM32F4_ADON                   BIT(0)
+
+/* STM32F4_ADC_CSR - bit fields */
+#define STM32F4_EOC3                   BIT(17)
+#define STM32F4_EOC2                   BIT(9)
+#define STM32F4_EOC1                   BIT(1)
+
+/* STM32F4_ADC_CCR - bit fields */
+#define STM32F4_ADC_ADCPRE_SHIFT       16
+#define STM32F4_ADC_ADCPRE_MASK                GENMASK(17, 16)
+
+/* STM32H7 - Registers for each ADC instance */
+#define STM32H7_ADC_ISR                        0x00
+#define STM32H7_ADC_IER                        0x04
+#define STM32H7_ADC_CR                 0x08
+#define STM32H7_ADC_CFGR               0x0C
+#define STM32H7_ADC_SMPR1              0x14
+#define STM32H7_ADC_SMPR2              0x18
+#define STM32H7_ADC_PCSEL              0x1C
+#define STM32H7_ADC_SQR1               0x30
+#define STM32H7_ADC_SQR2               0x34
+#define STM32H7_ADC_SQR3               0x38
+#define STM32H7_ADC_SQR4               0x3C
+#define STM32H7_ADC_DR                 0x40
+#define STM32H7_ADC_DIFSEL             0xC0
+#define STM32H7_ADC_CALFACT            0xC4
+#define STM32H7_ADC_CALFACT2           0xC8
+
+/* STM32H7 - common registers for all ADC instances */
+#define STM32H7_ADC_CSR                        (STM32_ADCX_COMN_OFFSET + 0x00)
+#define STM32H7_ADC_CCR                        (STM32_ADCX_COMN_OFFSET + 0x08)
+
+/* STM32H7_ADC_ISR - bit fields */
+#define STM32MP1_VREGREADY             BIT(12)
+#define STM32H7_EOC                    BIT(2)
+#define STM32H7_ADRDY                  BIT(0)
+
+/* STM32H7_ADC_IER - bit fields */
+#define STM32H7_EOCIE                  STM32H7_EOC
+
+/* STM32H7_ADC_CR - bit fields */
+#define STM32H7_ADCAL                  BIT(31)
+#define STM32H7_ADCALDIF               BIT(30)
+#define STM32H7_DEEPPWD                        BIT(29)
+#define STM32H7_ADVREGEN               BIT(28)
+#define STM32H7_LINCALRDYW6            BIT(27)
+#define STM32H7_LINCALRDYW5            BIT(26)
+#define STM32H7_LINCALRDYW4            BIT(25)
+#define STM32H7_LINCALRDYW3            BIT(24)
+#define STM32H7_LINCALRDYW2            BIT(23)
+#define STM32H7_LINCALRDYW1            BIT(22)
+#define STM32H7_ADCALLIN               BIT(16)
+#define STM32H7_BOOST                  BIT(8)
+#define STM32H7_ADSTP                  BIT(4)
+#define STM32H7_ADSTART                        BIT(2)
+#define STM32H7_ADDIS                  BIT(1)
+#define STM32H7_ADEN                   BIT(0)
+
+/* STM32H7_ADC_CFGR bit fields */
+#define STM32H7_EXTEN_SHIFT            10
+#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
+#define STM32H7_EXTSEL_SHIFT           5
+#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
+#define STM32H7_RES_SHIFT              2
+#define STM32H7_RES_MASK               GENMASK(4, 2)
+#define STM32H7_DMNGT_SHIFT            0
+#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
+
+enum stm32h7_adc_dmngt {
+       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
+       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
+       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
+       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
+};
+
+/* STM32H7_ADC_CALFACT - bit fields */
+#define STM32H7_CALFACT_D_SHIFT                16
+#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
+#define STM32H7_CALFACT_S_SHIFT                0
+#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
+
+/* STM32H7_ADC_CALFACT2 - bit fields */
+#define STM32H7_LINCALFACT_SHIFT       0
+#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
+
+/* STM32H7_ADC_CSR - bit fields */
+#define STM32H7_EOC_SLV                        BIT(18)
+#define STM32H7_EOC_MST                        BIT(2)
+
+/* STM32H7_ADC_CCR - bit fields */
+#define STM32H7_PRESC_SHIFT            18
+#define STM32H7_PRESC_MASK             GENMASK(21, 18)
+#define STM32H7_CKMODE_SHIFT           16
+#define STM32H7_CKMODE_MASK            GENMASK(17, 16)
+
 /**
  * struct stm32_adc_common - stm32 ADC driver common data (for all instances)
  * @base:              control registers base cpu addr
index 6a7dd08b1e0b1fde1fbf361135228b969b7597fe..663f8a5012d64123b06a646887c4c75a5f9860e1 100644 (file)
 
 #include "stm32-adc-core.h"
 
-/* STM32F4 - Registers for each ADC instance */
-#define STM32F4_ADC_SR                 0x00
-#define STM32F4_ADC_CR1                        0x04
-#define STM32F4_ADC_CR2                        0x08
-#define STM32F4_ADC_SMPR1              0x0C
-#define STM32F4_ADC_SMPR2              0x10
-#define STM32F4_ADC_HTR                        0x24
-#define STM32F4_ADC_LTR                        0x28
-#define STM32F4_ADC_SQR1               0x2C
-#define STM32F4_ADC_SQR2               0x30
-#define STM32F4_ADC_SQR3               0x34
-#define STM32F4_ADC_JSQR               0x38
-#define STM32F4_ADC_JDR1               0x3C
-#define STM32F4_ADC_JDR2               0x40
-#define STM32F4_ADC_JDR3               0x44
-#define STM32F4_ADC_JDR4               0x48
-#define STM32F4_ADC_DR                 0x4C
-
-/* STM32F4_ADC_SR - bit fields */
-#define STM32F4_STRT                   BIT(4)
-#define STM32F4_EOC                    BIT(1)
-
-/* STM32F4_ADC_CR1 - bit fields */
-#define STM32F4_RES_SHIFT              24
-#define STM32F4_RES_MASK               GENMASK(25, 24)
-#define STM32F4_SCAN                   BIT(8)
-#define STM32F4_EOCIE                  BIT(5)
-
-/* STM32F4_ADC_CR2 - bit fields */
-#define STM32F4_SWSTART                        BIT(30)
-#define STM32F4_EXTEN_SHIFT            28
-#define STM32F4_EXTEN_MASK             GENMASK(29, 28)
-#define STM32F4_EXTSEL_SHIFT           24
-#define STM32F4_EXTSEL_MASK            GENMASK(27, 24)
-#define STM32F4_EOCS                   BIT(10)
-#define STM32F4_DDS                    BIT(9)
-#define STM32F4_DMA                    BIT(8)
-#define STM32F4_ADON                   BIT(0)
-
-/* STM32H7 - Registers for each ADC instance */
-#define STM32H7_ADC_ISR                        0x00
-#define STM32H7_ADC_IER                        0x04
-#define STM32H7_ADC_CR                 0x08
-#define STM32H7_ADC_CFGR               0x0C
-#define STM32H7_ADC_SMPR1              0x14
-#define STM32H7_ADC_SMPR2              0x18
-#define STM32H7_ADC_PCSEL              0x1C
-#define STM32H7_ADC_SQR1               0x30
-#define STM32H7_ADC_SQR2               0x34
-#define STM32H7_ADC_SQR3               0x38
-#define STM32H7_ADC_SQR4               0x3C
-#define STM32H7_ADC_DR                 0x40
-#define STM32H7_ADC_DIFSEL             0xC0
-#define STM32H7_ADC_CALFACT            0xC4
-#define STM32H7_ADC_CALFACT2           0xC8
-
-/* STM32H7_ADC_ISR - bit fields */
-#define STM32MP1_VREGREADY             BIT(12)
-#define STM32H7_EOC                    BIT(2)
-#define STM32H7_ADRDY                  BIT(0)
-
-/* STM32H7_ADC_IER - bit fields */
-#define STM32H7_EOCIE                  STM32H7_EOC
-
-/* STM32H7_ADC_CR - bit fields */
-#define STM32H7_ADCAL                  BIT(31)
-#define STM32H7_ADCALDIF               BIT(30)
-#define STM32H7_DEEPPWD                        BIT(29)
-#define STM32H7_ADVREGEN               BIT(28)
-#define STM32H7_LINCALRDYW6            BIT(27)
-#define STM32H7_LINCALRDYW5            BIT(26)
-#define STM32H7_LINCALRDYW4            BIT(25)
-#define STM32H7_LINCALRDYW3            BIT(24)
-#define STM32H7_LINCALRDYW2            BIT(23)
-#define STM32H7_LINCALRDYW1            BIT(22)
-#define STM32H7_ADCALLIN               BIT(16)
-#define STM32H7_BOOST                  BIT(8)
-#define STM32H7_ADSTP                  BIT(4)
-#define STM32H7_ADSTART                        BIT(2)
-#define STM32H7_ADDIS                  BIT(1)
-#define STM32H7_ADEN                   BIT(0)
-
-/* STM32H7_ADC_CFGR bit fields */
-#define STM32H7_EXTEN_SHIFT            10
-#define STM32H7_EXTEN_MASK             GENMASK(11, 10)
-#define STM32H7_EXTSEL_SHIFT           5
-#define STM32H7_EXTSEL_MASK            GENMASK(9, 5)
-#define STM32H7_RES_SHIFT              2
-#define STM32H7_RES_MASK               GENMASK(4, 2)
-#define STM32H7_DMNGT_SHIFT            0
-#define STM32H7_DMNGT_MASK             GENMASK(1, 0)
-
-enum stm32h7_adc_dmngt {
-       STM32H7_DMNGT_DR_ONLY,          /* Regular data in DR only */
-       STM32H7_DMNGT_DMA_ONESHOT,      /* DMA one shot mode */
-       STM32H7_DMNGT_DFSDM,            /* DFSDM mode */
-       STM32H7_DMNGT_DMA_CIRC,         /* DMA circular mode */
-};
-
-/* STM32H7_ADC_CALFACT - bit fields */
-#define STM32H7_CALFACT_D_SHIFT                16
-#define STM32H7_CALFACT_D_MASK         GENMASK(26, 16)
-#define STM32H7_CALFACT_S_SHIFT                0
-#define STM32H7_CALFACT_S_MASK         GENMASK(10, 0)
-
-/* STM32H7_ADC_CALFACT2 - bit fields */
-#define STM32H7_LINCALFACT_SHIFT       0
-#define STM32H7_LINCALFACT_MASK                GENMASK(29, 0)
-
 /* Number of linear calibration shadow registers / LINCALRDYW control bits */
 #define STM32H7_LINCALFACT_NUM         6
 
index 9ac8356d9a954b0189dbdf38627c8b48d086d9a6..4998a89d083d54d4b6dafd2ccb9b3d825d2447e9 100644 (file)
@@ -35,8 +35,11 @@ static int adis_update_scan_mode_burst(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
-       if (!adis->buffer)
+       if (!adis->buffer) {
+               kfree(adis->xfer);
+               adis->xfer = NULL;
                return -ENOMEM;
+       }
 
        tx = adis->buffer + burst_length;
        tx[0] = ADIS_READ_REG(adis->burst->reg_cmd);
@@ -78,8 +81,11 @@ int adis_update_scan_mode(struct iio_dev *indio_dev,
                return -ENOMEM;
 
        adis->buffer = kcalloc(indio_dev->scan_bytes, 2, GFP_KERNEL);
-       if (!adis->buffer)
+       if (!adis->buffer) {
+               kfree(adis->xfer);
+               adis->xfer = NULL;
                return -ENOMEM;
+       }
 
        rx = adis->buffer;
        tx = rx + scan_count;
index 80e42c7dbcbe63039f4d23ed363799c75a1d9698..0fe6999b825714c5725d9a3cea3769a8a11e5b05 100644 (file)
@@ -99,7 +99,9 @@ struct st_lsm6dsx_fs {
 #define ST_LSM6DSX_FS_LIST_SIZE                4
 struct st_lsm6dsx_fs_table_entry {
        struct st_lsm6dsx_reg reg;
+
        struct st_lsm6dsx_fs fs_avl[ST_LSM6DSX_FS_LIST_SIZE];
+       int fs_len;
 };
 
 /**
index 2d34955601368870cb8ee931600a67146c5bb3e7..fd5ebe1e1594f3a396e858026f2baa3b55808aa3 100644 (file)
@@ -145,6 +145,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(732), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -154,6 +155,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[0] = {  IIO_DEGREE_TO_RAD(245), 0x0 },
                                .fs_avl[1] = {  IIO_DEGREE_TO_RAD(500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(2000), 0x3 },
+                               .fs_len = 3,
                        },
                },
        },
@@ -215,6 +217,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -225,6 +228,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -327,6 +331,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -337,6 +342,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -448,6 +454,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -458,6 +465,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .decimator = {
@@ -563,6 +571,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -573,6 +582,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -693,6 +703,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -703,6 +714,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -800,6 +812,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_G_TO_M_S_2(122), 0x2 },
                                .fs_avl[2] = { IIO_G_TO_M_S_2(244), 0x3 },
                                .fs_avl[3] = { IIO_G_TO_M_S_2(488), 0x1 },
+                               .fs_len = 4,
                        },
                        [ST_LSM6DSX_ID_GYRO] = {
                                .reg = {
@@ -810,6 +823,7 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
                                .fs_avl[1] = { IIO_DEGREE_TO_RAD(17500), 0x1 },
                                .fs_avl[2] = { IIO_DEGREE_TO_RAD(35000), 0x2 },
                                .fs_avl[3] = { IIO_DEGREE_TO_RAD(70000), 0x3 },
+                               .fs_len = 4,
                        },
                },
                .batch = {
@@ -933,11 +947,12 @@ static int st_lsm6dsx_set_full_scale(struct st_lsm6dsx_sensor *sensor,
        int i, err;
 
        fs_table = &sensor->hw->settings->fs_table[sensor->id];
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++)
+       for (i = 0; i < fs_table->fs_len; i++) {
                if (fs_table->fs_avl[i].gain == gain)
                        break;
+       }
 
-       if (i == ST_LSM6DSX_FS_LIST_SIZE)
+       if (i == fs_table->fs_len)
                return -EINVAL;
 
        data = ST_LSM6DSX_SHIFT_VAL(fs_table->fs_avl[i].val,
@@ -1196,18 +1211,13 @@ static ssize_t st_lsm6dsx_sysfs_scale_avail(struct device *dev,
 {
        struct st_lsm6dsx_sensor *sensor = iio_priv(dev_get_drvdata(dev));
        const struct st_lsm6dsx_fs_table_entry *fs_table;
-       enum st_lsm6dsx_sensor_id id = sensor->id;
        struct st_lsm6dsx_hw *hw = sensor->hw;
        int i, len = 0;
 
-       fs_table = &hw->settings->fs_table[id];
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
-               if (!fs_table->fs_avl[i].gain)
-                       break;
-
+       fs_table = &hw->settings->fs_table[sensor->id];
+       for (i = 0; i < fs_table->fs_len; i++)
                len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
                                 fs_table->fs_avl[i].gain);
-       }
        buf[len - 1] = '\n';
 
        return len;
index 66fbcd94642d42237476d537e2c5e563556be004..ea472cf6db7ba53ff180c01e338790764c70c91a 100644 (file)
@@ -61,6 +61,7 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
                                .gain = 1500,
                                .val = 0x0,
                        }, /* 1500 uG/LSB */
+                       .fs_len = 1,
                },
                .temp_comp = {
                        .addr = 0x60,
@@ -92,9 +93,11 @@ static const struct st_lsm6dsx_ext_dev_settings st_lsm6dsx_ext_dev_table[] = {
 static void st_lsm6dsx_shub_wait_complete(struct st_lsm6dsx_hw *hw)
 {
        struct st_lsm6dsx_sensor *sensor;
+       u16 odr;
 
        sensor = iio_priv(hw->iio_devs[ST_LSM6DSX_ID_ACC]);
-       msleep((2000U / sensor->odr) + 1);
+       odr = (hw->enable_mask & BIT(ST_LSM6DSX_ID_ACC)) ? sensor->odr : 13;
+       msleep((2000U / odr) + 1);
 }
 
 /**
@@ -555,13 +558,9 @@ static ssize_t st_lsm6dsx_shub_scale_avail(struct device *dev,
        int i, len = 0;
 
        settings = sensor->ext_info.settings;
-       for (i = 0; i < ST_LSM6DSX_FS_LIST_SIZE; i++) {
-               u16 val = settings->fs_table.fs_avl[i].gain;
-
-               if (val > 0)
-                       len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
-                                        val);
-       }
+       for (i = 0; i < settings->fs_table.fs_len; i++)
+               len += scnprintf(buf + len, PAGE_SIZE - len, "0.%06u ",
+                                settings->fs_table.fs_avl[i].gain);
        buf[len - 1] = '\n';
 
        return len;
index 08d7e1ef21861b56ff3415b86da7ab762f42dd1e..4a1a883dc0612b7c18493e7d79dcb2759c2155d2 100644 (file)
@@ -314,6 +314,7 @@ config MAX44009
 config NOA1305
        tristate "ON Semiconductor NOA1305 ambient light sensor"
        depends on I2C
+       select REGMAP_I2C
        help
         Say Y here if you want to build support for the ON Semiconductor
         NOA1305 ambient light sensor.
index e666879007d2c9180bfb076dee01e36cd58b9329..92004a2563ea80b910733b657bf10b9be206537f 100644 (file)
@@ -686,6 +686,7 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
        struct iio_dev *iio = _iio;
        struct opt3001 *opt = iio_priv(iio);
        int ret;
+       bool wake_result_ready_queue = false;
 
        if (!opt->ok_to_ignore_lock)
                mutex_lock(&opt->lock);
@@ -720,13 +721,16 @@ static irqreturn_t opt3001_irq(int irq, void *_iio)
                }
                opt->result = ret;
                opt->result_ready = true;
-               wake_up(&opt->result_ready_queue);
+               wake_result_ready_queue = true;
        }
 
 out:
        if (!opt->ok_to_ignore_lock)
                mutex_unlock(&opt->lock);
 
+       if (wake_result_ready_queue)
+               wake_up(&opt->result_ready_queue);
+
        return IRQ_HANDLED;
 }
 
index 51421ac3251771b3f46ebdd1af3c06967f26cc81..16dacea9eadfa540afd57c7889339fc03384f639 100644 (file)
@@ -398,19 +398,23 @@ static int vcnl4000_probe(struct i2c_client *client,
 static const struct of_device_id vcnl_4000_of_match[] = {
        {
                .compatible = "vishay,vcnl4000",
-               .data = "VCNL4000",
+               .data = (void *)VCNL4000,
        },
        {
                .compatible = "vishay,vcnl4010",
-               .data = "VCNL4010",
+               .data = (void *)VCNL4010,
        },
        {
-               .compatible = "vishay,vcnl4010",
-               .data = "VCNL4020",
+               .compatible = "vishay,vcnl4020",
+               .data = (void *)VCNL4010,
+       },
+       {
+               .compatible = "vishay,vcnl4040",
+               .data = (void *)VCNL4040,
        },
        {
                .compatible = "vishay,vcnl4200",
-               .data = "VCNL4200",
+               .data = (void *)VCNL4200,
        },
        {},
 };
index da10e6ccb43cd033c0d7fd4f148b04793ff07e08..5920c0085d35b8de1a0f606382507c18476b23a9 100644 (file)
@@ -4399,6 +4399,7 @@ error2:
 error1:
        port_modify.set_port_cap_mask = 0;
        port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
+       kfree(port);
        while (--i) {
                if (!rdma_cap_ib_cm(ib_device, i))
                        continue;
@@ -4407,6 +4408,7 @@ error1:
                ib_modify_port(ib_device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
                cm_remove_port_fs(port);
+               kfree(port);
        }
 free:
        kfree(cm_dev);
@@ -4460,6 +4462,7 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data)
                spin_unlock_irq(&cm.state_lock);
                ib_unregister_mad_agent(cur_mad_agent);
                cm_remove_port_fs(port);
+               kfree(port);
        }
 
        kfree(cm_dev);
index 0e3cf34619992c7fa016040aa8ffd74c0145b5bd..d78f67623f24ecf68b345709dc16e7f5689dbf28 100644 (file)
@@ -2396,9 +2396,10 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
                conn_id->cm_id.iw = NULL;
                cma_exch(conn_id, RDMA_CM_DESTROYING);
                mutex_unlock(&conn_id->handler_mutex);
+               mutex_unlock(&listen_id->handler_mutex);
                cma_deref_id(conn_id);
                rdma_destroy_id(&conn_id->id);
-               goto out;
+               return ret;
        }
 
        mutex_unlock(&conn_id->handler_mutex);
index 3a8b0911c3bc16193032dcf1c678f18ded9d9f70..9d07378b5b4230f6e5f1568f3e6b428e95f7b98a 100644 (file)
@@ -199,6 +199,7 @@ void ib_mad_cleanup(void);
 int ib_sa_init(void);
 void ib_sa_cleanup(void);
 
+void rdma_nl_init(void);
 void rdma_nl_exit(void);
 
 int ib_nl_handle_resolve_resp(struct sk_buff *skb,
index 99c4a55545cfba00812824c87ac56c4b0a2acd20..50a92442c4f7c15ebc0708494cca898d3707c1cb 100644 (file)
@@ -1987,8 +1987,6 @@ static int iw_query_port(struct ib_device *device,
        if (!netdev)
                return -ENODEV;
 
-       dev_put(netdev);
-
        port_attr->max_mtu = IB_MTU_4096;
        port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
 
@@ -1996,19 +1994,22 @@ static int iw_query_port(struct ib_device *device,
                port_attr->state = IB_PORT_DOWN;
                port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
        } else {
-               inetdev = in_dev_get(netdev);
+               rcu_read_lock();
+               inetdev = __in_dev_get_rcu(netdev);
 
                if (inetdev && inetdev->ifa_list) {
                        port_attr->state = IB_PORT_ACTIVE;
                        port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
-                       in_dev_put(inetdev);
                } else {
                        port_attr->state = IB_PORT_INIT;
                        port_attr->phys_state =
                                IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
                }
+
+               rcu_read_unlock();
        }
 
+       dev_put(netdev);
        err = device->ops.query_port(device, port_num, port_attr);
        if (err)
                return err;
@@ -2715,6 +2716,8 @@ static int __init ib_core_init(void)
                goto err_comp_unbound;
        }
 
+       rdma_nl_init();
+
        ret = addr_init();
        if (ret) {
                pr_warn("Could't init IB address resolution\n");
index 72141c5b7c95d8701d66272fa0a697465f2103a8..ade71823370f3335566079599a76dde871cc9333 100644 (file)
@@ -372,6 +372,7 @@ EXPORT_SYMBOL(iw_cm_disconnect);
 static void destroy_cm_id(struct iw_cm_id *cm_id)
 {
        struct iwcm_id_private *cm_id_priv;
+       struct ib_qp *qp;
        unsigned long flags;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
@@ -389,6 +390,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
        set_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags);
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
+
        switch (cm_id_priv->state) {
        case IW_CM_STATE_LISTEN:
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
@@ -401,7 +405,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
                /* Abrupt close of the connection */
-               (void)iwcm_modify_qp_err(cm_id_priv->qp);
+               (void)iwcm_modify_qp_err(qp);
                spin_lock_irqsave(&cm_id_priv->lock, flags);
                break;
        case IW_CM_STATE_IDLE:
@@ -426,11 +430,9 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                BUG();
                break;
        }
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
 
        if (cm_id->mapped) {
                iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
@@ -671,11 +673,11 @@ int iw_cm_accept(struct iw_cm_id *cm_id,
                BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
                cm_id_priv->state = IW_CM_STATE_IDLE;
                spin_lock_irqsave(&cm_id_priv->lock, flags);
-               if (cm_id_priv->qp) {
-                       cm_id->device->ops.iw_rem_ref(qp);
-                       cm_id_priv->qp = NULL;
-               }
+               qp = cm_id_priv->qp;
+               cm_id_priv->qp = NULL;
                spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+               if (qp)
+                       cm_id->device->ops.iw_rem_ref(qp);
                clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
                wake_up_all(&cm_id_priv->connect_wait);
        }
@@ -696,7 +698,7 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
        struct iwcm_id_private *cm_id_priv;
        int ret;
        unsigned long flags;
-       struct ib_qp *qp;
+       struct ib_qp *qp = NULL;
 
        cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
 
@@ -730,13 +732,13 @@ int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
                return 0;       /* success */
 
        spin_lock_irqsave(&cm_id_priv->lock, flags);
-       if (cm_id_priv->qp) {
-               cm_id->device->ops.iw_rem_ref(qp);
-               cm_id_priv->qp = NULL;
-       }
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
        cm_id_priv->state = IW_CM_STATE_IDLE;
 err:
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id->device->ops.iw_rem_ref(qp);
        clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
        wake_up_all(&cm_id_priv->connect_wait);
        return ret;
@@ -878,6 +880,7 @@ static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                               struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp = NULL;
        unsigned long flags;
        int ret;
 
@@ -896,11 +899,13 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
                cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
        } else {
                /* REJECTED or RESET */
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
+               qp = cm_id_priv->qp;
                cm_id_priv->qp = NULL;
                cm_id_priv->state = IW_CM_STATE_IDLE;
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
        ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
 
        if (iw_event->private_data_len)
@@ -942,21 +947,18 @@ static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
                                  struct iw_cm_event *iw_event)
 {
+       struct ib_qp *qp;
        unsigned long flags;
-       int ret = 0;
+       int ret = 0, notify_event = 0;
        spin_lock_irqsave(&cm_id_priv->lock, flags);
+       qp = cm_id_priv->qp;
+       cm_id_priv->qp = NULL;
 
-       if (cm_id_priv->qp) {
-               cm_id_priv->id.device->ops.iw_rem_ref(cm_id_priv->qp);
-               cm_id_priv->qp = NULL;
-       }
        switch (cm_id_priv->state) {
        case IW_CM_STATE_ESTABLISHED:
        case IW_CM_STATE_CLOSING:
                cm_id_priv->state = IW_CM_STATE_IDLE;
-               spin_unlock_irqrestore(&cm_id_priv->lock, flags);
-               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
-               spin_lock_irqsave(&cm_id_priv->lock, flags);
+               notify_event = 1;
                break;
        case IW_CM_STATE_DESTROYING:
                break;
@@ -965,6 +967,10 @@ static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
        }
        spin_unlock_irqrestore(&cm_id_priv->lock, flags);
 
+       if (qp)
+               cm_id_priv->id.device->ops.iw_rem_ref(qp);
+       if (notify_event)
+               ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
        return ret;
 }
 
index 81dbd5f41beda241c1f815077fcad91a7f019f26..8cd31ef25eff20e3f54e5ab7f8f72243748062e2 100644 (file)
 #include <linux/module.h>
 #include "core_priv.h"
 
-static DEFINE_MUTEX(rdma_nl_mutex);
 static struct {
-       const struct rdma_nl_cbs   *cb_table;
+       const struct rdma_nl_cbs *cb_table;
+       /* Synchronizes between ongoing netlink commands and netlink client
+        * unregistration.
+        */
+       struct rw_semaphore sem;
 } rdma_nl_types[RDMA_NL_NUM_CLIENTS];
 
 bool rdma_nl_chk_listeners(unsigned int group)
@@ -75,70 +78,53 @@ static bool is_nl_msg_valid(unsigned int type, unsigned int op)
        return (op < max_num_ops[type]) ? true : false;
 }
 
-static bool
-is_nl_valid(const struct sk_buff *skb, unsigned int type, unsigned int op)
+static const struct rdma_nl_cbs *
+get_cb_table(const struct sk_buff *skb, unsigned int type, unsigned int op)
 {
        const struct rdma_nl_cbs *cb_table;
 
-       if (!is_nl_msg_valid(type, op))
-               return false;
-
        /*
         * Currently only NLDEV client is supporting netlink commands in
         * non init_net net namespace.
         */
        if (sock_net(skb->sk) != &init_net && type != RDMA_NL_NLDEV)
-               return false;
+               return NULL;
 
-       if (!rdma_nl_types[type].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               request_module("rdma-netlink-subsys-%d", type);
-               mutex_lock(&rdma_nl_mutex);
-       }
+       cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       if (!cb_table) {
+               /*
+                * Didn't get valid reference of the table, attempt module
+                * load once.
+                */
+               up_read(&rdma_nl_types[type].sem);
 
-       cb_table = rdma_nl_types[type].cb_table;
+               request_module("rdma-netlink-subsys-%d", type);
 
+               down_read(&rdma_nl_types[type].sem);
+               cb_table = READ_ONCE(rdma_nl_types[type].cb_table);
+       }
        if (!cb_table || (!cb_table[op].dump && !cb_table[op].doit))
-               return false;
-       return true;
+               return NULL;
+       return cb_table;
 }
 
 void rdma_nl_register(unsigned int index,
                      const struct rdma_nl_cbs cb_table[])
 {
-       mutex_lock(&rdma_nl_mutex);
-       if (!is_nl_msg_valid(index, 0)) {
-               /*
-                * All clients are not interesting in success/failure of
-                * this call. They want to see the print to error log and
-                * continue their initialization. Print warning for them,
-                * because it is programmer's error to be here.
-                */
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The not-valid %u index was supplied to RDMA netlink\n",
-                    index);
+       if (WARN_ON(!is_nl_msg_valid(index, 0)) ||
+           WARN_ON(READ_ONCE(rdma_nl_types[index].cb_table)))
                return;
-       }
-
-       if (rdma_nl_types[index].cb_table) {
-               mutex_unlock(&rdma_nl_mutex);
-               WARN(true,
-                    "The %u index is already registered in RDMA netlink\n",
-                    index);
-               return;
-       }
 
-       rdma_nl_types[index].cb_table = cb_table;
-       mutex_unlock(&rdma_nl_mutex);
+       /* Pairs with the READ_ONCE in is_nl_valid() */
+       smp_store_release(&rdma_nl_types[index].cb_table, cb_table);
 }
 EXPORT_SYMBOL(rdma_nl_register);
 
 void rdma_nl_unregister(unsigned int index)
 {
-       mutex_lock(&rdma_nl_mutex);
+       down_write(&rdma_nl_types[index].sem);
        rdma_nl_types[index].cb_table = NULL;
-       mutex_unlock(&rdma_nl_mutex);
+       up_write(&rdma_nl_types[index].sem);
 }
 EXPORT_SYMBOL(rdma_nl_unregister);
 
@@ -170,15 +156,21 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
        unsigned int index = RDMA_NL_GET_CLIENT(type);
        unsigned int op = RDMA_NL_GET_OP(type);
        const struct rdma_nl_cbs *cb_table;
+       int err = -EINVAL;
 
-       if (!is_nl_valid(skb, index, op))
+       if (!is_nl_msg_valid(index, op))
                return -EINVAL;
 
-       cb_table = rdma_nl_types[index].cb_table;
+       down_read(&rdma_nl_types[index].sem);
+       cb_table = get_cb_table(skb, index, op);
+       if (!cb_table)
+               goto done;
 
        if ((cb_table[op].flags & RDMA_NL_ADMIN_PERM) &&
-           !netlink_capable(skb, CAP_NET_ADMIN))
-               return -EPERM;
+           !netlink_capable(skb, CAP_NET_ADMIN)) {
+               err = -EPERM;
+               goto done;
+       }
 
        /*
         * LS responses overload the 0x100 (NLM_F_ROOT) flag.  Don't
@@ -186,8 +178,8 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
         */
        if (index == RDMA_NL_LS) {
                if (cb_table[op].doit)
-                       return cb_table[op].doit(skb, nlh, extack);
-               return -EINVAL;
+                       err = cb_table[op].doit(skb, nlh, extack);
+               goto done;
        }
        /* FIXME: Convert IWCM to properly handle doit callbacks */
        if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_IWCM) {
@@ -195,14 +187,15 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
                        .dump = cb_table[op].dump,
                };
                if (c.dump)
-                       return netlink_dump_start(skb->sk, skb, nlh, &c);
-               return -EINVAL;
+                       err = netlink_dump_start(skb->sk, skb, nlh, &c);
+               goto done;
        }
 
        if (cb_table[op].doit)
-               return cb_table[op].doit(skb, nlh, extack);
-
-       return 0;
+               err = cb_table[op].doit(skb, nlh, extack);
+done:
+       up_read(&rdma_nl_types[index].sem);
+       return err;
 }
 
 /*
@@ -263,9 +256,7 @@ skip:
 
 static void rdma_nl_rcv(struct sk_buff *skb)
 {
-       mutex_lock(&rdma_nl_mutex);
        rdma_nl_rcv_skb(skb, &rdma_nl_rcv_msg);
-       mutex_unlock(&rdma_nl_mutex);
 }
 
 int rdma_nl_unicast(struct net *net, struct sk_buff *skb, u32 pid)
@@ -297,6 +288,14 @@ int rdma_nl_multicast(struct net *net, struct sk_buff *skb,
 }
 EXPORT_SYMBOL(rdma_nl_multicast);
 
+void rdma_nl_init(void)
+{
+       int idx;
+
+       for (idx = 0; idx < RDMA_NL_NUM_CLIENTS; idx++)
+               init_rwsem(&rdma_nl_types[idx].sem);
+}
+
 void rdma_nl_exit(void)
 {
        int idx;
index 7a7474000100c1dcd30462da3da775d4e3f4288c..c03af08b80e776688cfa3d4e303d37b1b992a7af 100644 (file)
@@ -778,7 +778,7 @@ static int fill_res_counter_entry(struct sk_buff *msg, bool has_cap_net_admin,
                container_of(res, struct rdma_counter, res);
 
        if (port && port != counter->port)
-               return 0;
+               return -EAGAIN;
 
        /* Dump it even query failed */
        rdma_counter_query_stats(counter);
@@ -1230,7 +1230,7 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg) {
                ret = -ENOMEM;
-               goto err;
+               goto err_get;
        }
 
        nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
@@ -1787,10 +1787,6 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
 
        cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
        qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
-       ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
-       if (ret)
-               goto err_unbind;
-
        if (fill_nldev_handle(msg, device) ||
            nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
            nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
@@ -1799,13 +1795,15 @@ static int nldev_stat_del_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto err_fill;
        }
 
+       ret = rdma_counter_unbind_qpn(device, port, qpn, cntn);
+       if (ret)
+               goto err_fill;
+
        nlmsg_end(msg, nlh);
        ib_device_put(device);
        return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
 
 err_fill:
-       rdma_counter_bind_qpn(device, port, qpn, cntn);
-err_unbind:
        nlmsg_free(msg);
 err:
        ib_device_put(device);
index 1ab423b19f778f6e5d9740e9d61b7cd54c2a7fc2..6eb6d2717ca5b2b392acc723d82ca5efcac1d5ce 100644 (file)
@@ -426,7 +426,7 @@ int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
        int ret;
 
        rdma_for_each_port (dev, i) {
-               is_ib = rdma_protocol_ib(dev, i++);
+               is_ib = rdma_protocol_ib(dev, i);
                if (is_ib)
                        break;
        }
index f67a30fda1ed9a332cd5aedcf417ebd00c1019a9..163ff7ba92b7f136fda5c751357aa813f8d66733 100644 (file)
@@ -451,8 +451,10 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
         * that the hardware will not attempt to access the MR any more.
         */
        if (!umem_odp->is_implicit_odp) {
+               mutex_lock(&umem_odp->umem_mutex);
                ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
                                            ib_umem_end(umem_odp));
+               mutex_unlock(&umem_odp->umem_mutex);
                kvfree(umem_odp->dma_list);
                kvfree(umem_odp->page_list);
        }
@@ -719,6 +721,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
        u64 addr;
        struct ib_device *dev = umem_odp->umem.ibdev;
 
+       lockdep_assert_held(&umem_odp->umem_mutex);
+
        virt = max_t(u64, virt, ib_umem_start(umem_odp));
        bound = min_t(u64, bound, ib_umem_end(umem_odp));
        /* Note that during the run of this function, the
@@ -726,7 +730,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
         * faults from completion. We might be racing with other
         * invalidations, so we must make sure we free each page only
         * once. */
-       mutex_lock(&umem_odp->umem_mutex);
        for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
                idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
                if (umem_odp->page_list[idx]) {
@@ -757,7 +760,6 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
                        umem_odp->npages--;
                }
        }
-       mutex_unlock(&umem_odp->umem_mutex);
 }
 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
 
index 1e5aeb39f774df6a1bc386ea448b89763ee1705f..63f7f7db590286a6e6bdc5d4d5cd037bb841294b 100644 (file)
@@ -98,7 +98,7 @@ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
 
 struct ib_uverbs_device {
        atomic_t                                refcount;
-       int                                     num_comp_vectors;
+       u32                                     num_comp_vectors;
        struct completion                       comp;
        struct device                           dev;
        /* First group for device attributes, NULL terminated array */
index f974b68542245206936e623a6d62d6ef948cecce..35c2841a569e98e73229bc80a99d8f350c00cda7 100644 (file)
@@ -662,16 +662,17 @@ static bool find_gid_index(const union ib_gid *gid,
                           void *context)
 {
        struct find_gid_index_context *ctx = context;
+       u16 vlan_id = 0xffff;
+       int ret;
 
        if (ctx->gid_type != gid_attr->gid_type)
                return false;
 
-       if ((!!(ctx->vlan_id != 0xffff) == !is_vlan_dev(gid_attr->ndev)) ||
-           (is_vlan_dev(gid_attr->ndev) &&
-            vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id))
+       ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
+       if (ret)
                return false;
 
-       return true;
+       return ctx->vlan_id == vlan_id;
 }
 
 static const struct ib_gid_attr *
index e87fc0408470452c4bc5600c127f69848c2c3cb3..347dc242fb8829eab1fca133e61da178cad78081 100644 (file)
@@ -495,7 +495,6 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -506,7 +505,6 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
        ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
        c4iw_put_ep(&ep->parent_ep->com);
        release_ep_resources(ep);
-       kfree_skb(skb);
        return 0;
 }
 
@@ -2424,20 +2422,6 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
        enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
 
        pr_debug("ep %p tid %u\n", ep, ep->hwtid);
-
-       skb_get(skb);
-       rpl = cplhdr(skb);
-       if (!is_t4(adapter_type)) {
-               skb_trim(skb, roundup(sizeof(*rpl5), 16));
-               rpl5 = (void *)rpl;
-               INIT_TP_WR(rpl5, ep->hwtid);
-       } else {
-               skb_trim(skb, sizeof(*rpl));
-               INIT_TP_WR(rpl, ep->hwtid);
-       }
-       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
-                                                   ep->hwtid));
-
        cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
                      enable_tcp_timestamps && req->tcpopt.tstamp,
                      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
@@ -2483,6 +2467,20 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN_V(1);
        }
+
+       skb_get(skb);
+       rpl = cplhdr(skb);
+       if (!is_t4(adapter_type)) {
+               skb_trim(skb, roundup(sizeof(*rpl5), 16));
+               rpl5 = (void *)rpl;
+               INIT_TP_WR(rpl5, ep->hwtid);
+       } else {
+               skb_trim(skb, sizeof(*rpl));
+               INIT_TP_WR(rpl, ep->hwtid);
+       }
+       OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
+                                                   ep->hwtid));
+
        if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
index a8b9548bd1a260e259d1c31ce5384b2a28820861..599340c1f0b82c61171db4211532461f6a790aa7 100644 (file)
@@ -242,10 +242,13 @@ static void set_ep_sin6_addrs(struct c4iw_ep *ep,
        }
 }
 
-static int dump_qp(struct c4iw_qp *qp, struct c4iw_debugfs_data *qpd)
+static int dump_qp(unsigned long id, struct c4iw_qp *qp,
+                  struct c4iw_debugfs_data *qpd)
 {
        int space;
        int cc;
+       if (id != qp->wq.sq.qid)
+               return 0;
 
        space = qpd->bufsize - qpd->pos - 1;
        if (space == 0)
@@ -350,7 +353,7 @@ static int qp_open(struct inode *inode, struct file *file)
 
        xa_lock_irq(&qpd->devp->qps);
        xa_for_each(&qpd->devp->qps, index, qp)
-               dump_qp(qp, qpd);
+               dump_qp(index, qp, qpd);
        xa_unlock_irq(&qpd->devp->qps);
 
        qpd->buf[qpd->pos++] = 0;
index aa772ee0706f9991ced0a0f6644e15f916d9151c..35c284af574dadaee494f71e5381f0462c6bb3f1 100644 (file)
@@ -275,13 +275,17 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                           struct sk_buff *skb, struct c4iw_wr_wait *wr_waitp)
 {
        int err;
-       struct fw_ri_tpte tpt;
+       struct fw_ri_tpte *tpt;
        u32 stag_idx;
        static atomic_t key;
 
        if (c4iw_fatal_error(rdev))
                return -EIO;
 
+       tpt = kmalloc(sizeof(*tpt), GFP_KERNEL);
+       if (!tpt)
+               return -ENOMEM;
+
        stag_state = stag_state > 0;
        stag_idx = (*stag) >> 8;
 
@@ -291,6 +295,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                        mutex_lock(&rdev->stats.lock);
                        rdev->stats.stag.fail++;
                        mutex_unlock(&rdev->stats.lock);
+                       kfree(tpt);
                        return -ENOMEM;
                }
                mutex_lock(&rdev->stats.lock);
@@ -305,28 +310,28 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
 
        /* write TPT entry */
        if (reset_tpt_entry)
-               memset(&tpt, 0, sizeof(tpt));
+               memset(tpt, 0, sizeof(*tpt));
        else {
-               tpt.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
+               tpt->valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
                        FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) |
                        FW_RI_TPTE_STAGSTATE_V(stag_state) |
                        FW_RI_TPTE_STAGTYPE_V(type) | FW_RI_TPTE_PDID_V(pdid));
-               tpt.locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
+               tpt->locread_to_qpid = cpu_to_be32(FW_RI_TPTE_PERM_V(perm) |
                        (bind_enabled ? FW_RI_TPTE_MWBINDEN_F : 0) |
                        FW_RI_TPTE_ADDRTYPE_V((zbva ? FW_RI_ZERO_BASED_TO :
                                                      FW_RI_VA_BASED_TO))|
                        FW_RI_TPTE_PS_V(page_size));
-               tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
+               tpt->nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32(
                        FW_RI_TPTE_PBLADDR_V(PBL_OFF(rdev, pbl_addr)>>3));
-               tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
-               tpt.va_hi = cpu_to_be32((u32)(to >> 32));
-               tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
-               tpt.dca_mwbcnt_pstag = cpu_to_be32(0);
-               tpt.len_hi = cpu_to_be32((u32)(len >> 32));
+               tpt->len_lo = cpu_to_be32((u32)(len & 0xffffffffUL));
+               tpt->va_hi = cpu_to_be32((u32)(to >> 32));
+               tpt->va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL));
+               tpt->dca_mwbcnt_pstag = cpu_to_be32(0);
+               tpt->len_hi = cpu_to_be32((u32)(len >> 32));
        }
        err = write_adapter_mem(rdev, stag_idx +
                                (rdev->lldi.vr->stag.start >> 5),
-                               sizeof(tpt), &tpt, skb, wr_waitp);
+                               sizeof(*tpt), tpt, skb, wr_waitp);
 
        if (reset_tpt_entry) {
                c4iw_put_resource(&rdev->resource.tpt_table, stag_idx);
@@ -334,6 +339,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
                rdev->stats.stag.cur -= 32;
                mutex_unlock(&rdev->stats.lock);
        }
+       kfree(tpt);
        return err;
 }
 
index eb9368be28c1df457f948b19ff4ef103c8dac12f..bbcac539777a2f62281963bf988465c6add06d58 100644 (file)
@@ -2737,15 +2737,11 @@ int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
        if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
                srq->flags = T4_SRQ_LIMIT_SUPPORT;
 
-       ret = xa_insert_irq(&rhp->qps, srq->wq.qid, srq, GFP_KERNEL);
-       if (ret)
-               goto err_free_queue;
-
        if (udata) {
                srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
                if (!srq_key_mm) {
                        ret = -ENOMEM;
-                       goto err_remove_handle;
+                       goto err_free_queue;
                }
                srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
                if (!srq_db_key_mm) {
@@ -2789,8 +2785,6 @@ err_free_srq_db_key_mm:
        kfree(srq_db_key_mm);
 err_free_srq_key_mm:
        kfree(srq_key_mm);
-err_remove_handle:
-       xa_erase_irq(&rhp->qps, srq->wq.qid);
 err_free_queue:
        free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
                       srq->wr_waitp);
@@ -2813,8 +2807,6 @@ void c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
        rhp = srq->rhp;
 
        pr_debug("%s id %d\n", __func__, srq->wq.qid);
-
-       xa_erase_irq(&rhp->qps, srq->wq.qid);
        ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
                                             ibucontext);
        free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
index 2395fd4233a7e729041b2f31fb8639c996fa70ed..c61b6022575e1abe24a4a7bbf67729a0ad140315 100644 (file)
@@ -65,6 +65,7 @@
 #define SDMA_DESCQ_CNT 2048
 #define SDMA_DESC_INTR 64
 #define INVALID_TAIL 0xffff
+#define SDMA_PAD max_t(size_t, MAX_16B_PADDING, sizeof(u32))
 
 static uint sdma_descq_cnt = SDMA_DESCQ_CNT;
 module_param(sdma_descq_cnt, uint, S_IRUGO);
@@ -1296,7 +1297,7 @@ void sdma_clean(struct hfi1_devdata *dd, size_t num_engines)
        struct sdma_engine *sde;
 
        if (dd->sdma_pad_dma) {
-               dma_free_coherent(&dd->pcidev->dev, 4,
+               dma_free_coherent(&dd->pcidev->dev, SDMA_PAD,
                                  (void *)dd->sdma_pad_dma,
                                  dd->sdma_pad_phys);
                dd->sdma_pad_dma = NULL;
@@ -1491,7 +1492,7 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        /* Allocate memory for pad */
-       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, sizeof(u32),
+       dd->sdma_pad_dma = dma_alloc_coherent(&dd->pcidev->dev, SDMA_PAD,
                                              &dd->sdma_pad_phys, GFP_KERNEL);
        if (!dd->sdma_pad_dma) {
                dd_dev_err(dd, "failed to allocate SendDMA pad memory\n");
@@ -1526,8 +1527,11 @@ int sdma_init(struct hfi1_devdata *dd, u8 port)
        }
 
        ret = rhashtable_init(tmp_sdma_rht, &sdma_rht_params);
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(tmp_sdma_rht);
                goto bail;
+       }
+
        dd->sdma_rht = tmp_sdma_rht;
 
        dd_dev_info(dd, "SDMA num_sdma: %u\n", dd->num_sdma);
index b4dcc4d29f84e0e10fcd312b97b520fda0507fd8..f21fca3617d5feae86f97213130c84426efb3218 100644 (file)
@@ -2736,11 +2736,6 @@ static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
                                diff = cmp_psn(psn,
                                               flow->flow_state.r_next_psn);
                                if (diff > 0) {
-                                       if (!(qp->r_flags & RVT_R_RDMAR_SEQ))
-                                               restart_tid_rdma_read_req(rcd,
-                                                                         qp,
-                                                                         wqe);
-
                                        /* Drop the packet.*/
                                        goto s_unlock;
                                } else if (diff < 0) {
index 7bff0a1e713d77535a1ba52de3186b082b1e429e..089e201d75500072f527410a6ea8b1499fef19b1 100644 (file)
@@ -147,9 +147,6 @@ static int pio_wait(struct rvt_qp *qp,
 /* Length of buffer to create verbs txreq cache name */
 #define TXREQ_NAME_LEN 24
 
-/* 16B trailing buffer */
-static const u8 trail_buf[MAX_16B_PADDING];
-
 static uint wss_threshold = 80;
 module_param(wss_threshold, uint, S_IRUGO);
 MODULE_PARM_DESC(wss_threshold, "Percentage (1-100) of LLC to use as a threshold for a cacheless copy");
@@ -820,8 +817,8 @@ static int build_verbs_tx_desc(
 
        /* add icrc, lt byte, and padding to flit */
        if (extra_bytes)
-               ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq,
-                                       (void *)trail_buf, extra_bytes);
+               ret = sdma_txadd_daddr(sde->dd, &tx->txreq,
+                                      sde->dd->sdma_pad_phys, extra_bytes);
 
 bail_txadd:
        return ret;
@@ -1089,7 +1086,8 @@ int hfi1_verbs_send_pio(struct rvt_qp *qp, struct hfi1_pkt_state *ps,
                }
                /* add icrc, lt byte, and padding to flit */
                if (extra_bytes)
-                       seg_pio_copy_mid(pbuf, trail_buf, extra_bytes);
+                       seg_pio_copy_mid(pbuf, ppd->dd->sdma_pad_dma,
+                                        extra_bytes);
 
                seg_pio_copy_end(pbuf);
        }
index 7a89d669f8bfa93265228c855d233a48cd5f5cf6..e82567fcdeb7f05c46110a77176d50563ee3e10f 100644 (file)
@@ -5389,9 +5389,9 @@ static void hns_roce_v2_free_eq(struct hns_roce_dev *hr_dev,
                return;
        }
 
-       if (eq->buf_list)
-               dma_free_coherent(hr_dev->dev, buf_chk_sz,
-                                 eq->buf_list->buf, eq->buf_list->map);
+       dma_free_coherent(hr_dev->dev, buf_chk_sz, eq->buf_list->buf,
+                         eq->buf_list->map);
+       kfree(eq->buf_list);
 }
 
 static void hns_roce_config_eqc(struct hns_roce_dev *hr_dev,
index 8056930bbe2c726e20bea2c0f92914f114b2f919..cd9ee1664a69e2f431cd169e8376d9a55a846a59 100644 (file)
@@ -2773,6 +2773,10 @@ int i40iw_register_rdma_device(struct i40iw_device *iwdev)
                return -ENOMEM;
        iwibdev = iwdev->iwibdev;
        rdma_set_device_sysfs_group(&iwibdev->ibdev, &i40iw_attr_group);
+       ret = ib_device_set_netdev(&iwibdev->ibdev, iwdev->netdev, 1);
+       if (ret)
+               goto error;
+
        ret = ib_register_device(&iwibdev->ibdev, "i40iw%d");
        if (ret)
                goto error;
index 59022b7441448f0b5eca7e5f406196e4cfe777d3..d609f4659afb7a93d134528e974467e961847749 100644 (file)
@@ -1298,29 +1298,6 @@ static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
        return 0;
 }
 
-static void devx_free_indirect_mkey(struct rcu_head *rcu)
-{
-       kfree(container_of(rcu, struct devx_obj, devx_mr.rcu));
-}
-
-/* This function to delete from the radix tree needs to be called before
- * destroying the underlying mkey. Otherwise a race might occur in case that
- * other thread will get the same mkey before this one will be deleted,
- * in that case it will fail via inserting to the tree its own data.
- *
- * Note:
- * An error in the destroy is not expected unless there is some other indirect
- * mkey which points to this one. In a kernel cleanup flow it will be just
- * destroyed in the iterative destruction call. In a user flow, in case
- * the application didn't close in the expected order it's its own problem,
- * the mkey won't be part of the tree, in both cases the kernel is safe.
- */
-static void devx_cleanup_mkey(struct devx_obj *obj)
-{
-       xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
-                mlx5_base_mkey(obj->devx_mr.mmkey.key));
-}
-
 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
                                      struct devx_event_subscription *sub)
 {
@@ -1362,8 +1339,16 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
        int ret;
 
        dev = mlx5_udata_to_mdev(&attrs->driver_udata);
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
-               devx_cleanup_mkey(obj);
+       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+               /*
+                * The pagefault_single_data_segment() does commands against
+                * the mmkey, we must wait for that to stop before freeing the
+                * mkey, as another allocation could get the same mkey #.
+                */
+               xa_erase(&obj->ib_dev->mdev->priv.mkey_table,
+                        mlx5_base_mkey(obj->devx_mr.mmkey.key));
+               synchronize_srcu(&dev->mr_srcu);
+       }
 
        if (obj->flags & DEVX_OBJ_FLAGS_DCT)
                ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
@@ -1382,12 +1367,6 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
                devx_cleanup_subscription(dev, sub_entry);
        mutex_unlock(&devx_event_table->event_xa_lock);
 
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
-               call_srcu(&dev->mr_srcu, &obj->devx_mr.rcu,
-                         devx_free_indirect_mkey);
-               return ret;
-       }
-
        kfree(obj);
        return ret;
 }
@@ -1491,26 +1470,21 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
                                   &obj_id);
        WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
 
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
-               err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
-               if (err)
-                       goto obj_destroy;
-       }
-
        err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
        if (err)
-               goto err_copy;
+               goto obj_destroy;
 
        if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
                obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
-
        obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
 
+       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
+               err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
+               if (err)
+                       goto obj_destroy;
+       }
        return 0;
 
-err_copy:
-       if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY)
-               devx_cleanup_mkey(obj);
 obj_destroy:
        if (obj->flags & DEVX_OBJ_FLAGS_DCT)
                mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
index 2ceaef3ea3fb92286d737d9264396df025bc5c76..1a98ee2e01c4b991b10b3c83f8395cf79de94524 100644 (file)
@@ -606,7 +606,7 @@ struct mlx5_ib_mr {
        struct mlx5_ib_dev     *dev;
        u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
        struct mlx5_core_sig_ctx    *sig;
-       int                     live;
+       unsigned int            live;
        void                    *descs_alloc;
        int                     access_flags; /* Needed for rereg MR */
 
@@ -639,7 +639,6 @@ struct mlx5_ib_mw {
 struct mlx5_ib_devx_mr {
        struct mlx5_core_mkey   mmkey;
        int                     ndescs;
-       struct rcu_head         rcu;
 };
 
 struct mlx5_ib_umr_context {
index 1eff031ef04842f06ab4d088b04a610b9388aa1b..7019c12005f4c1fb0606473b0ef72b50af7c3af5 100644 (file)
@@ -84,32 +84,6 @@ static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
                length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
 }
 
-static void update_odp_mr(struct mlx5_ib_mr *mr)
-{
-       if (is_odp_mr(mr)) {
-               /*
-                * This barrier prevents the compiler from moving the
-                * setting of umem->odp_data->private to point to our
-                * MR, before reg_umr finished, to ensure that the MR
-                * initialization have finished before starting to
-                * handle invalidations.
-                */
-               smp_wmb();
-               to_ib_umem_odp(mr->umem)->private = mr;
-               /*
-                * Make sure we will see the new
-                * umem->odp_data->private value in the invalidation
-                * routines, before we can get page faults on the
-                * MR. Page faults can happen once we put the MR in
-                * the tree, below this line. Without the barrier,
-                * there can be a fault handling and an invalidation
-                * before umem->odp_data->private == mr is visible to
-                * the invalidation handler.
-                */
-               smp_wmb();
-       }
-}
-
 static void reg_mr_callback(int status, struct mlx5_async_work *context)
 {
        struct mlx5_ib_mr *mr =
@@ -1346,8 +1320,6 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->umem = umem;
        set_mr_fields(dev, mr, npages, length, access_flags);
 
-       update_odp_mr(mr);
-
        if (use_umr) {
                int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
 
@@ -1363,10 +1335,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                }
        }
 
-       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
-               mr->live = 1;
+       if (is_odp_mr(mr)) {
+               to_ib_umem_odp(mr->umem)->private = mr;
                atomic_set(&mr->num_pending_prefetch, 0);
        }
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
+               smp_store_release(&mr->live, 1);
 
        return &mr->ibmr;
 error:
@@ -1441,6 +1415,9 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
        if (!mr->umem)
                return -EINVAL;
 
+       if (is_odp_mr(mr))
+               return -EOPNOTSUPP;
+
        if (flags & IB_MR_REREG_TRANS) {
                addr = virt_addr;
                len = length;
@@ -1486,8 +1463,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
                }
 
                mr->allocated_from_cache = 0;
-               if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
-                       mr->live = 1;
        } else {
                /*
                 * Send a UMR WQE
@@ -1516,7 +1491,6 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
 
        set_mr_fields(dev, mr, npages, len, access_flags);
 
-       update_odp_mr(mr);
        return 0;
 
 err:
@@ -1607,15 +1581,16 @@ static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
                /* Prevent new page faults and
                 * prefetch requests from succeeding
                 */
-               mr->live = 0;
+               WRITE_ONCE(mr->live, 0);
+
+               /* Wait for all running page-fault handlers to finish. */
+               synchronize_srcu(&dev->mr_srcu);
 
                /* dequeue pending prefetch requests for the mr */
                if (atomic_read(&mr->num_pending_prefetch))
                        flush_workqueue(system_unbound_wq);
                WARN_ON(atomic_read(&mr->num_pending_prefetch));
 
-               /* Wait for all running page-fault handlers to finish. */
-               synchronize_srcu(&dev->mr_srcu);
                /* Destroy all page mappings */
                if (!umem_odp->is_implicit_odp)
                        mlx5_ib_invalidate_range(umem_odp,
@@ -1987,14 +1962,25 @@ free:
 
 int mlx5_ib_dealloc_mw(struct ib_mw *mw)
 {
+       struct mlx5_ib_dev *dev = to_mdev(mw->device);
        struct mlx5_ib_mw *mmw = to_mmw(mw);
        int err;
 
-       err =  mlx5_core_destroy_mkey((to_mdev(mw->device))->mdev,
-                                     &mmw->mmkey);
-       if (!err)
-               kfree(mmw);
-       return err;
+       if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) {
+               xa_erase_irq(&dev->mdev->priv.mkey_table,
+                            mlx5_base_mkey(mmw->mmkey.key));
+               /*
+                * pagefault_single_data_segment() may be accessing mmw under
+                * SRCU if the user bound an ODP MR to this MW.
+                */
+               synchronize_srcu(&dev->mr_srcu);
+       }
+
+       err = mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+       if (err)
+               return err;
+       kfree(mmw);
+       return 0;
 }
 
 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
index 2e9b43061797455d406f9f875e63ab29d14ce1b9..3f9478d1937668bee0a5fb6cfa8e4e4a43fdc736 100644 (file)
@@ -178,6 +178,29 @@ void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
                return;
        }
 
+       /*
+        * The locking here is pretty subtle. Ideally the implicit children
+        * list would be protected by the umem_mutex, however that is not
+        * possible. Instead this uses a weaker update-then-lock pattern:
+        *
+        *  srcu_read_lock()
+        *    <change children list>
+        *    mutex_lock(umem_mutex)
+        *     mlx5_ib_update_xlt()
+        *    mutex_unlock(umem_mutex)
+        *    destroy lkey
+        *
+        * ie any change the children list must be followed by the locked
+        * update_xlt before destroying.
+        *
+        * The umem_mutex provides the acquire/release semantic needed to make
+        * the children list visible to a racing thread. While SRCU is not
+        * technically required, using it gives consistent use of the SRCU
+        * locking around the children list.
+        */
+       lockdep_assert_held(&to_ib_umem_odp(mr->umem)->umem_mutex);
+       lockdep_assert_held(&mr->dev->mr_srcu);
+
        odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE,
                         nentries * MLX5_IMR_MTT_SIZE, mr);
 
@@ -202,15 +225,22 @@ static void mr_leaf_free_action(struct work_struct *work)
        struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work);
        int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT;
        struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent;
+       struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
+       int srcu_key;
 
        mr->parent = NULL;
        synchronize_srcu(&mr->dev->mr_srcu);
 
-       ib_umem_odp_release(odp);
-       if (imr->live)
+       if (smp_load_acquire(&imr->live)) {
+               srcu_key = srcu_read_lock(&mr->dev->mr_srcu);
+               mutex_lock(&odp_imr->umem_mutex);
                mlx5_ib_update_xlt(imr, idx, 1, 0,
                                   MLX5_IB_UPD_XLT_INDIRECT |
                                   MLX5_IB_UPD_XLT_ATOMIC);
+               mutex_unlock(&odp_imr->umem_mutex);
+               srcu_read_unlock(&mr->dev->mr_srcu, srcu_key);
+       }
+       ib_umem_odp_release(odp);
        mlx5_mr_cache_free(mr->dev, mr);
 
        if (atomic_dec_and_test(&imr->num_leaf_free))
@@ -278,7 +308,6 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
                                   idx - blk_start_idx + 1, 0,
                                   MLX5_IB_UPD_XLT_ZAP |
                                   MLX5_IB_UPD_XLT_ATOMIC);
-       mutex_unlock(&umem_odp->umem_mutex);
        /*
         * We are now sure that the device will not access the
         * memory. We can safely unmap it, and mark it as dirty if
@@ -289,10 +318,12 @@ void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start,
 
        if (unlikely(!umem_odp->npages && mr->parent &&
                     !umem_odp->dying)) {
-               WRITE_ONCE(umem_odp->dying, 1);
+               WRITE_ONCE(mr->live, 0);
+               umem_odp->dying = 1;
                atomic_inc(&mr->parent->num_leaf_free);
                schedule_work(&umem_odp->work);
        }
+       mutex_unlock(&umem_odp->umem_mutex);
 }
 
 void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
@@ -429,8 +460,6 @@ static struct mlx5_ib_mr *implicit_mr_alloc(struct ib_pd *pd,
        mr->ibmr.lkey = mr->mmkey.key;
        mr->ibmr.rkey = mr->mmkey.key;
 
-       mr->live = 1;
-
        mlx5_ib_dbg(dev, "key %x dev %p mr %p\n",
                    mr->mmkey.key, dev->mdev, mr);
 
@@ -484,6 +513,8 @@ next_mr:
                mtt->parent = mr;
                INIT_WORK(&odp->work, mr_leaf_free_action);
 
+               smp_store_release(&mtt->live, 1);
+
                if (!nentries)
                        start_idx = addr >> MLX5_IMR_MTT_SHIFT;
                nentries++;
@@ -536,6 +567,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
        init_waitqueue_head(&imr->q_leaf_free);
        atomic_set(&imr->num_leaf_free, 0);
        atomic_set(&imr->num_pending_prefetch, 0);
+       smp_store_release(&imr->live, 1);
 
        return imr;
 }
@@ -555,15 +587,19 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
                if (mr->parent != imr)
                        continue;
 
+               mutex_lock(&umem_odp->umem_mutex);
                ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
                                            ib_umem_end(umem_odp));
 
-               if (umem_odp->dying)
+               if (umem_odp->dying) {
+                       mutex_unlock(&umem_odp->umem_mutex);
                        continue;
+               }
 
-               WRITE_ONCE(umem_odp->dying, 1);
+               umem_odp->dying = 1;
                atomic_inc(&imr->num_leaf_free);
                schedule_work(&umem_odp->work);
+               mutex_unlock(&umem_odp->umem_mutex);
        }
        up_read(&per_mm->umem_rwsem);
 
@@ -773,7 +809,7 @@ next_mr:
        switch (mmkey->type) {
        case MLX5_MKEY_MR:
                mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
-               if (!mr->live || !mr->ibmr.pd) {
+               if (!smp_load_acquire(&mr->live) || !mr->ibmr.pd) {
                        mlx5_ib_dbg(dev, "got dead MR\n");
                        ret = -EFAULT;
                        goto srcu_unlock;
@@ -1641,12 +1677,12 @@ static bool num_pending_prefetch_inc(struct ib_pd *pd,
 
                mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
 
-               if (mr->ibmr.pd != pd) {
+               if (!smp_load_acquire(&mr->live)) {
                        ret = false;
                        break;
                }
 
-               if (!mr->live) {
+               if (mr->ibmr.pd != pd) {
                        ret = false;
                        break;
                }
index 8937d72ddcf6bdb44f48b37acbbec178fa2650e0..5fd071c05944d900b95654925dfff09bc47e1e69 100644 (file)
@@ -3249,10 +3249,12 @@ static int modify_raw_packet_qp_sq(
        }
 
        /* Only remove the old rate after new rate was set */
-       if ((old_rl.rate &&
-            !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
-           (new_state != MLX5_SQC_STATE_RDY))
+       if ((old_rl.rate && !mlx5_rl_are_equal(&old_rl, &new_rl)) ||
+           (new_state != MLX5_SQC_STATE_RDY)) {
                mlx5_rl_remove_rate(dev, &old_rl);
+               if (new_state != MLX5_SQC_STATE_RDY)
+                       memset(&new_rl, 0, sizeof(new_rl));
+       }
 
        ibqp->rl = new_rl;
        sq->state = new_state;
index 5136b835e1ba0521050caf388d018125a0ad2347..dc71b6e16a07f300306a2d39b255d546ac3a38ea 100644 (file)
@@ -76,7 +76,7 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
        struct qedr_dev *qedr = get_qedr_dev(ibdev);
        u32 fw_ver = (u32)qedr->attr.fw_ver;
 
-       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d. %d. %d. %d",
+       snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d.%d",
                 (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
                 (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
 }
index 6cac0c88cf39d55f16df1af5b14c12513e2d8188..36cdfbdbd32568ad5ae48c795d3736a24e982d55 100644 (file)
@@ -230,8 +230,6 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
 
        pvrdma_page_dir_cleanup(dev, &srq->pdir);
 
-       kfree(srq);
-
        atomic_dec(&dev->num_srqs);
 }
 
index 430314c8abd948c200d0eb1c203d5325c8230be1..b4317480cee7486d2f461d4b870cd801bc636d93 100644 (file)
@@ -182,12 +182,19 @@ void siw_qp_llp_close(struct siw_qp *qp)
  */
 void siw_qp_llp_write_space(struct sock *sk)
 {
-       struct siw_cep *cep = sk_to_cep(sk);
+       struct siw_cep *cep;
 
-       cep->sk_write_space(sk);
+       read_lock(&sk->sk_callback_lock);
+
+       cep  = sk_to_cep(sk);
+       if (cep) {
+               cep->sk_write_space(sk);
 
-       if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
-               (void)siw_sq_start(cep->qp);
+               if (!test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
+                       (void)siw_sq_start(cep->qp);
+       }
+
+       read_unlock(&sk->sk_callback_lock);
 }
 
 static int siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size)
@@ -1305,6 +1312,7 @@ int siw_qp_add(struct siw_device *sdev, struct siw_qp *qp)
 void siw_free_qp(struct kref *ref)
 {
        struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref);
+       struct siw_base_qp *siw_base_qp = to_siw_base_qp(qp->ib_qp);
        struct siw_device *sdev = qp->sdev;
        unsigned long flags;
 
@@ -1327,4 +1335,5 @@ void siw_free_qp(struct kref *ref)
        atomic_dec(&sdev->num_qp);
        siw_dbg_qp(qp, "free QP\n");
        kfree_rcu(qp, rcu);
+       kfree(siw_base_qp);
 }
index 869e02b69a012389d6d3533241012b1c1309e401..b18a677832e105ded578bd74d29694c45e55d031 100644 (file)
@@ -604,7 +604,6 @@ out:
 int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
 {
        struct siw_qp *qp = to_siw_qp(base_qp);
-       struct siw_base_qp *siw_base_qp = to_siw_base_qp(base_qp);
        struct siw_ucontext *uctx =
                rdma_udata_to_drv_context(udata, struct siw_ucontext,
                                          base_ucontext);
@@ -641,7 +640,6 @@ int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
        qp->scq = qp->rcq = NULL;
 
        siw_qp_put(qp);
-       kfree(siw_base_qp);
 
        return 0;
 }
index dace8577fa43dcf1f84843bfeeda123e96b952fb..79851923ee576ab59d28d2f764b30d6b6a4b2f67 100644 (file)
@@ -232,10 +232,7 @@ static int da9063_onkey_probe(struct platform_device *pdev)
        onkey->input->phys = onkey->phys;
        onkey->input->dev.parent = &pdev->dev;
 
-       if (onkey->key_power)
-               input_set_capability(onkey->input, EV_KEY, KEY_POWER);
-
-       input_set_capability(onkey->input, EV_KEY, KEY_SLEEP);
+       input_set_capability(onkey->input, EV_KEY, KEY_POWER);
 
        INIT_DELAYED_WORK(&onkey->work, da9063_poll_on);
 
index 97e3639e99d0cdbeee8c5482e1b1d353e67d9441..08520b3a18b88d3e463e9ea7dd84c07f3567ed8a 100644 (file)
@@ -92,11 +92,18 @@ soc_button_device_create(struct platform_device *pdev,
                        continue;
 
                gpio = soc_button_lookup_gpio(&pdev->dev, info->acpi_index);
-               if (gpio < 0 && gpio != -ENOENT) {
-                       error = gpio;
-                       goto err_free_mem;
-               } else if (!gpio_is_valid(gpio)) {
-                       /* Skip GPIO if not present */
+               if (!gpio_is_valid(gpio)) {
+                       /*
+                        * Skip GPIO if not present. Note we deliberately
+                        * ignore -EPROBE_DEFER errors here. On some devices
+                        * Intel is using so called virtual GPIOs which are not
+                        * GPIOs at all but some way for AML code to check some
+                        * random status bits without need a custom opregion.
+                        * In some cases the resources table we parse points to
+                        * such a virtual GPIO, since these are not real GPIOs
+                        * we do not have a driver for these so they will never
+                        * show up, therefore we ignore -EPROBE_DEFER.
+                        */
                        continue;
                }
 
index 04fe43440a3c8740d58ee1cb944fc3b7099aa981..2d8434b7b62381a85abdb28daba2391fdd117e40 100644 (file)
@@ -1827,31 +1827,6 @@ static int elantech_create_smbus(struct psmouse *psmouse,
                                  leave_breadcrumbs);
 }
 
-static bool elantech_use_host_notify(struct psmouse *psmouse,
-                                    struct elantech_device_info *info)
-{
-       if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
-               return true;
-
-       switch (info->bus) {
-       case ETP_BUS_PS2_ONLY:
-               /* expected case */
-               break;
-       case ETP_BUS_SMB_HST_NTFY_ONLY:
-       case ETP_BUS_PS2_SMB_HST_NTFY:
-               /* SMbus implementation is stable since 2018 */
-               if (dmi_get_bios_year() >= 2018)
-                       return true;
-               /* fall through */
-       default:
-               psmouse_dbg(psmouse,
-                           "Ignoring SMBus bus provider %d\n", info->bus);
-               break;
-       }
-
-       return false;
-}
-
 /**
  * elantech_setup_smbus - called once the PS/2 devices are enumerated
  * and decides to instantiate a SMBus InterTouch device.
@@ -1871,7 +1846,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
                 * i2c_blacklist_pnp_ids.
                 * Old ICs are up to the user to decide.
                 */
-               if (!elantech_use_host_notify(psmouse, info) ||
+               if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
                    psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
                        return -ENXIO;
        }
@@ -1891,6 +1866,34 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
        return 0;
 }
 
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+                                    struct elantech_device_info *info)
+{
+       if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+               return true;
+
+       switch (info->bus) {
+       case ETP_BUS_PS2_ONLY:
+               /* expected case */
+               break;
+       case ETP_BUS_SMB_ALERT_ONLY:
+               /* fall-through  */
+       case ETP_BUS_PS2_SMB_ALERT:
+               psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
+               break;
+       case ETP_BUS_SMB_HST_NTFY_ONLY:
+               /* fall-through  */
+       case ETP_BUS_PS2_SMB_HST_NTFY:
+               return true;
+       default:
+               psmouse_dbg(psmouse,
+                           "Ignoring SMBus bus provider %d.\n",
+                           info->bus);
+       }
+
+       return false;
+}
+
 int elantech_init_smbus(struct psmouse *psmouse)
 {
        struct elantech_device_info info;
index 772493b1f665da7bad6c857902a2bfbb00d12c5e..190b9974526bb9e78f8b3200283b680690db2079 100644 (file)
@@ -146,7 +146,7 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
        }
 
        mutex_lock(&data->irq_mutex);
-       bitmap_and(data->irq_status, data->irq_status, data->current_irq_mask,
+       bitmap_and(data->irq_status, data->irq_status, data->fn_irq_bits,
               data->irq_count);
        /*
         * At this point, irq_status has all bits that are set in the
@@ -385,6 +385,8 @@ static int rmi_driver_set_irq_bits(struct rmi_device *rmi_dev,
        bitmap_copy(data->current_irq_mask, data->new_irq_mask,
                    data->num_of_irq_regs);
 
+       bitmap_or(data->fn_irq_bits, data->fn_irq_bits, mask, data->irq_count);
+
 error_unlock:
        mutex_unlock(&data->irq_mutex);
        return error;
@@ -398,6 +400,8 @@ static int rmi_driver_clear_irq_bits(struct rmi_device *rmi_dev,
        struct device *dev = &rmi_dev->dev;
 
        mutex_lock(&data->irq_mutex);
+       bitmap_andnot(data->fn_irq_bits,
+                     data->fn_irq_bits, mask, data->irq_count);
        bitmap_andnot(data->new_irq_mask,
                  data->current_irq_mask, mask, data->irq_count);
 
index 5178ea8b5f3099eee52a8483e72fa0b2dcce679b..fb43aa708660632613013c936b937bfe9991f46e 100644 (file)
@@ -53,6 +53,7 @@ struct goodix_ts_data {
        const char *cfg_name;
        struct completion firmware_loading_complete;
        unsigned long irq_flags;
+       unsigned int contact_size;
 };
 
 #define GOODIX_GPIO_INT_NAME           "irq"
@@ -62,6 +63,7 @@ struct goodix_ts_data {
 #define GOODIX_MAX_WIDTH               4096
 #define GOODIX_INT_TRIGGER             1
 #define GOODIX_CONTACT_SIZE            8
+#define GOODIX_MAX_CONTACT_SIZE                9
 #define GOODIX_MAX_CONTACTS            10
 
 #define GOODIX_CONFIG_MAX_LENGTH       240
@@ -144,6 +146,19 @@ static const struct dmi_system_id rotated_screen[] = {
        {}
 };
 
+static const struct dmi_system_id nine_bytes_report[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+       {
+               .ident = "Lenovo YogaBook",
+               /* YB1-X91L/F and YB1-X90L/F */
+               .matches = {
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X9")
+               }
+       },
+#endif
+       {}
+};
+
 /**
  * goodix_i2c_read - read data from a register of the i2c slave device.
  *
@@ -249,7 +264,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
        do {
                error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
-                                       data, GOODIX_CONTACT_SIZE + 1);
+                                       data, ts->contact_size + 1);
                if (error) {
                        dev_err(&ts->client->dev, "I2C transfer error: %d\n",
                                        error);
@@ -262,12 +277,12 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
                                return -EPROTO;
 
                        if (touch_num > 1) {
-                               data += 1 + GOODIX_CONTACT_SIZE;
+                               data += 1 + ts->contact_size;
                                error = goodix_i2c_read(ts->client,
                                                GOODIX_READ_COOR_ADDR +
-                                                       1 + GOODIX_CONTACT_SIZE,
+                                                       1 + ts->contact_size,
                                                data,
-                                               GOODIX_CONTACT_SIZE *
+                                               ts->contact_size *
                                                        (touch_num - 1));
                                if (error)
                                        return error;
@@ -286,7 +301,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
        return 0;
 }
 
-static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
+static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
 {
        int id = coor_data[0] & 0x0F;
        int input_x = get_unaligned_le16(&coor_data[1]);
@@ -301,6 +316,21 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
        input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
 }
 
+static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
+{
+       int id = coor_data[1] & 0x0F;
+       int input_x = get_unaligned_le16(&coor_data[3]);
+       int input_y = get_unaligned_le16(&coor_data[5]);
+       int input_w = get_unaligned_le16(&coor_data[7]);
+
+       input_mt_slot(ts->input_dev, id);
+       input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
+       touchscreen_report_pos(ts->input_dev, &ts->prop,
+                              input_x, input_y, true);
+       input_report_abs(ts->input_dev, ABS_MT_TOUCH_MAJOR, input_w);
+       input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
+}
+
 /**
  * goodix_process_events - Process incoming events
  *
@@ -311,7 +341,7 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
  */
 static void goodix_process_events(struct goodix_ts_data *ts)
 {
-       u8  point_data[1 + GOODIX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+       u8  point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
        int touch_num;
        int i;
 
@@ -326,8 +356,12 @@ static void goodix_process_events(struct goodix_ts_data *ts)
        input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
 
        for (i = 0; i < touch_num; i++)
-               goodix_ts_report_touch(ts,
-                               &point_data[1 + GOODIX_CONTACT_SIZE * i]);
+               if (ts->contact_size == 9)
+                       goodix_ts_report_touch_9b(ts,
+                               &point_data[1 + ts->contact_size * i]);
+               else
+                       goodix_ts_report_touch_8b(ts,
+                               &point_data[1 + ts->contact_size * i]);
 
        input_mt_sync_frame(ts->input_dev);
        input_sync(ts->input_dev);
@@ -730,6 +764,13 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
                        "Applying '180 degrees rotated screen' quirk\n");
        }
 
+       if (dmi_check_system(nine_bytes_report)) {
+               ts->contact_size = 9;
+
+               dev_dbg(&ts->client->dev,
+                       "Non-standard 9-bytes report format quirk\n");
+       }
+
        error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
                                    INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
        if (error) {
@@ -810,6 +851,7 @@ static int goodix_ts_probe(struct i2c_client *client,
        ts->client = client;
        i2c_set_clientdata(client, ts);
        init_completion(&ts->firmware_loading_complete);
+       ts->contact_size = GOODIX_CONTACT_SIZE;
 
        error = goodix_get_gpio_config(ts);
        if (error)
index 34923399ece4e98534022f90d8ad4180ef4536b6..1139714e72e2672f26a52be85280e7f8fea22e11 100644 (file)
@@ -81,8 +81,10 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts)
        for (i = 0, y = 0; i < ts->chip_info->max_fingers; i++, y += 3) {
                finger[i].is_valid = buf[i + y] >> 7;
                if (finger[i].is_valid) {
-                       finger[i].x = ((buf[i + y] & 0x0070) << 4) | buf[i + 1];
-                       finger[i].y = ((buf[i + y] & 0x0007) << 8) | buf[i + 2];
+                       finger[i].x = ((buf[i + y] & 0x0070) << 4) |
+                                       buf[i + y + 1];
+                       finger[i].y = ((buf[i + y] & 0x0007) << 8) |
+                                       buf[i + y + 2];
 
                        /* st1232 includes a z-axis / touch strength */
                        if (ts->chip_info->have_z)
index 2369b8af81f3b7e4d829a75ef979fb1858f0682f..dd555078258c437c375902538ecc2f9040efd7cc 100644 (file)
@@ -583,7 +583,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
 retry:
        type    = (event[1] >> EVENT_TYPE_SHIFT)  & EVENT_TYPE_MASK;
        devid   = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
-       pasid   = PPR_PASID(*(u64 *)&event[0]);
+       pasid   = (event[0] & EVENT_DOMID_MASK_HI) |
+                 (event[1] & EVENT_DOMID_MASK_LO);
        flags   = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
        address = (u64)(((u64)event[3]) << 32) | event[2];
 
@@ -616,7 +617,7 @@ retry:
                        address, flags);
                break;
        case EVENT_TYPE_PAGE_TAB_ERR:
-               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x domain=0x%04x address=0x%llx flags=0x%04x]\n",
+               dev_err(dev, "Event logged [PAGE_TAB_HARDWARE_ERROR device=%02x:%02x.%x pasid=0x%04x address=0x%llx flags=0x%04x]\n",
                        PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid),
                        pasid, address, flags);
                break;
@@ -1463,6 +1464,7 @@ static void free_pagetable(struct protection_domain *domain)
  * to 64 bits.
  */
 static bool increase_address_space(struct protection_domain *domain,
+                                  unsigned long address,
                                   gfp_t gfp)
 {
        unsigned long flags;
@@ -1471,8 +1473,8 @@ static bool increase_address_space(struct protection_domain *domain,
 
        spin_lock_irqsave(&domain->lock, flags);
 
-       if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
-               /* address space already 64 bit large */
+       if (address <= PM_LEVEL_SIZE(domain->mode) ||
+           WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
                goto out;
 
        pte = (void *)get_zeroed_page(gfp);
@@ -1505,7 +1507,7 @@ static u64 *alloc_pte(struct protection_domain *domain,
        BUG_ON(!is_power_of_2(page_size));
 
        while (address > PM_LEVEL_SIZE(domain->mode))
-               *updated = increase_address_space(domain, gfp) || *updated;
+               *updated = increase_address_space(domain, address, gfp) || *updated;
 
        level   = domain->mode - 1;
        pte     = &domain->pt_root[PM_LEVEL_INDEX(level, address)];
index c235f79b7a200c90f8b8d4325a61ce4373de8d86..5120ce4fdce326b6b59185d66e7ba8cb146bf4d5 100644 (file)
@@ -73,6 +73,19 @@ static const struct dmi_system_id ivrs_quirks[] __initconst = {
                },
                .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
        },
+       {
+               /*
+                * Acer Aspire A315-41 requires the very same workaround as
+                * Dell Latitude 5495
+                */
+               .callback = ivrs_ioapic_quirk_cb,
+               .ident = "Acer Aspire A315-41",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire A315-41"),
+               },
+               .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
+       },
        {
                .callback = ivrs_ioapic_quirk_cb,
                .ident = "Lenovo ideapad 330S-15ARR",
index c9c1612d52e00090a2fb1a4e374e9468ba25607e..17bd5a349119f89d7ba2f140fc89d99ed73e85d4 100644 (file)
 #define EVENT_TYPE_INV_PPR_REQ 0x9
 #define EVENT_DEVID_MASK       0xffff
 #define EVENT_DEVID_SHIFT      0
-#define EVENT_DOMID_MASK       0xffff
-#define EVENT_DOMID_SHIFT      0
+#define EVENT_DOMID_MASK_LO    0xffff
+#define EVENT_DOMID_MASK_HI    0xf0000
 #define EVENT_FLAGS_MASK       0xfff
 #define EVENT_FLAGS_SHIFT      0x10
 
index b18aac4c105eef088d03cb624011dcc25284f9e5..7c503a6bc5854fbf04cfdd68591fd5c2edb23b9d 100644 (file)
@@ -812,6 +812,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
        return 0;
 
 out_clear_smmu:
+       __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
        smmu_domain->smmu = NULL;
 out_unlock:
        mutex_unlock(&smmu_domain->init_mutex);
index 3f974919d3bdb4c7ee78adc091f585c4cc7a6da7..6db6d969e31c6adcc5202fa0ea97eabf347b7b63 100644 (file)
@@ -2794,7 +2794,7 @@ static int identity_mapping(struct device *dev)
        struct device_domain_info *info;
 
        info = dev->archdata.iommu;
-       if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
+       if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
                return (info->domain == si_domain);
 
        return 0;
@@ -3471,7 +3471,7 @@ static bool iommu_need_mapping(struct device *dev)
                if (dev->coherent_dma_mask && dev->coherent_dma_mask < dma_mask)
                        dma_mask = dev->coherent_dma_mask;
 
-               if (dma_mask >= dma_get_required_mask(dev))
+               if (dma_mask >= dma_direct_get_required_mask(dev))
                        return false;
 
                /*
@@ -3775,6 +3775,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
        return nelems;
 }
 
+static u64 intel_get_required_mask(struct device *dev)
+{
+       if (!iommu_need_mapping(dev))
+               return dma_direct_get_required_mask(dev);
+       return DMA_BIT_MASK(32);
+}
+
 static const struct dma_map_ops intel_dma_ops = {
        .alloc = intel_alloc_coherent,
        .free = intel_free_coherent,
@@ -3787,6 +3794,7 @@ static const struct dma_map_ops intel_dma_ops = {
        .dma_supported = dma_direct_supported,
        .mmap = dma_common_mmap,
        .get_sgtable = dma_common_get_sgtable,
+       .get_required_mask = intel_get_required_mask,
 };
 
 static void
index 4c91359057c53d906ee5496396e28595c2153905..ca51036aa53c7140470fbc0fb3abbc975e7a7a45 100644 (file)
 #define ARM_MALI_LPAE_TTBR_READ_INNER  BIT(2)
 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
 
+#define ARM_MALI_LPAE_MEMATTR_IMP_DEF  0x88ULL
+#define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
+
 /* IOPTE accessors */
 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
 
@@ -1015,27 +1018,56 @@ arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
 static struct io_pgtable *
 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
 {
-       struct io_pgtable *iop;
+       struct arm_lpae_io_pgtable *data;
 
-       if (cfg->ias != 48 || cfg->oas > 40)
+       /* No quirks for Mali (hopefully) */
+       if (cfg->quirks)
+               return NULL;
+
+       if (cfg->ias > 48 || cfg->oas > 40)
                return NULL;
 
        cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
-       iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
-       if (iop) {
-               u64 mair, ttbr;
 
-               /* Copy values as union fields overlap */
-               mair = cfg->arm_lpae_s1_cfg.mair[0];
-               ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
+       data = arm_lpae_alloc_pgtable(cfg);
+       if (!data)
+               return NULL;
 
-               cfg->arm_mali_lpae_cfg.memattr = mair;
-               cfg->arm_mali_lpae_cfg.transtab = ttbr |
-                       ARM_MALI_LPAE_TTBR_READ_INNER |
-                       ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+       /* Mali seems to need a full 4-level table regardless of IAS */
+       if (data->levels < ARM_LPAE_MAX_LEVELS) {
+               data->levels = ARM_LPAE_MAX_LEVELS;
+               data->pgd_size = sizeof(arm_lpae_iopte);
        }
+       /*
+        * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
+        * best we can do is mimic the out-of-tree driver and hope that the
+        * "implementation-defined caching policy" is good enough. Similarly,
+        * we'll use it for the sake of a valid attribute for our 'device'
+        * index, although callers should never request that in practice.
+        */
+       cfg->arm_mali_lpae_cfg.memattr =
+               (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
+               (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
+               (ARM_MALI_LPAE_MEMATTR_IMP_DEF
+                << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
 
-       return iop;
+       data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+       if (!data->pgd)
+               goto out_free_data;
+
+       /* Ensure the empty pgd is visible before TRANSTAB can be written */
+       wmb();
+
+       cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
+                                         ARM_MALI_LPAE_TTBR_READ_INNER |
+                                         ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
+       return &data->iop;
+
+out_free_data:
+       kfree(data);
+       return NULL;
 }
 
 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
index 9da8309f71708f213f91dbe99374681e0da3f652..2639fc7181171c4ab1db52bb0765c8f0a8b24bd6 100644 (file)
@@ -1086,8 +1086,6 @@ static int ipmmu_probe(struct platform_device *pdev)
 
        mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts);
 
-       irq = platform_get_irq(pdev, 0);
-
        /*
         * Determine if this IPMMU instance is a root device by checking for
         * the lack of has_cache_leaf_nodes flag or renesas,ipmmu-main property.
@@ -1106,10 +1104,9 @@ static int ipmmu_probe(struct platform_device *pdev)
 
        /* Root devices have mandatory IRQs */
        if (ipmmu_is_root(mmu)) {
-               if (irq < 0) {
-                       dev_err(&pdev->dev, "no IRQ found\n");
+               irq = platform_get_irq(pdev, 0);
+               if (irq < 0)
                        return irq;
-               }
 
                ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0,
                                       dev_name(&pdev->dev), mmu);
index 26290f310f90bd8c6d05da9b255ec02d144cd56f..4dcbf68dfda436828ded5d86442d19e48fa62ff1 100644 (file)
@@ -100,6 +100,7 @@ struct rk_iommu {
        struct device *dev;
        void __iomem **bases;
        int num_mmu;
+       int num_irq;
        struct clk_bulk_data *clocks;
        int num_clocks;
        bool reset_disabled;
@@ -1136,7 +1137,7 @@ static int rk_iommu_probe(struct platform_device *pdev)
        struct rk_iommu *iommu;
        struct resource *res;
        int num_res = pdev->num_resources;
-       int err, i, irq;
+       int err, i;
 
        iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
        if (!iommu)
@@ -1163,6 +1164,10 @@ static int rk_iommu_probe(struct platform_device *pdev)
        if (iommu->num_mmu == 0)
                return PTR_ERR(iommu->bases[0]);
 
+       iommu->num_irq = platform_irq_count(pdev);
+       if (iommu->num_irq < 0)
+               return iommu->num_irq;
+
        iommu->reset_disabled = device_property_read_bool(dev,
                                        "rockchip,disable-mmu-reset");
 
@@ -1219,8 +1224,9 @@ static int rk_iommu_probe(struct platform_device *pdev)
 
        pm_runtime_enable(dev);
 
-       i = 0;
-       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) {
+       for (i = 0; i < iommu->num_irq; i++) {
+               int irq = platform_get_irq(pdev, i);
+
                if (irq < 0)
                        return irq;
 
@@ -1245,10 +1251,13 @@ err_unprepare_clocks:
 static void rk_iommu_shutdown(struct platform_device *pdev)
 {
        struct rk_iommu *iommu = platform_get_drvdata(pdev);
-       int i = 0, irq;
+       int i;
+
+       for (i = 0; i < iommu->num_irq; i++) {
+               int irq = platform_get_irq(pdev, i);
 
-       while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
                devm_free_irq(iommu->dev, irq, iommu);
+       }
 
        pm_runtime_force_suspend(&pdev->dev);
 }
index 1a57cee3efab165591d3933ebda9711164e2c51c..0b0a737397562472636525b88a1fd3559b6dafc2 100644 (file)
@@ -15,6 +15,7 @@
 
 /* FIC Registers */
 #define AL_FIC_CAUSE           0x00
+#define AL_FIC_SET_CAUSE       0x08
 #define AL_FIC_MASK            0x10
 #define AL_FIC_CONTROL         0x28
 
@@ -126,6 +127,16 @@ static void al_fic_irq_handler(struct irq_desc *desc)
        chained_irq_exit(irqchip, desc);
 }
 
+static int al_fic_irq_retrigger(struct irq_data *data)
+{
+       struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
+       struct al_fic *fic = gc->private;
+
+       writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE);
+
+       return 1;
+}
+
 static int al_fic_register(struct device_node *node,
                           struct al_fic *fic)
 {
@@ -159,6 +170,7 @@ static int al_fic_register(struct device_node *node,
        gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit;
        gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit;
        gc->chip_types->chip.irq_set_type = al_fic_irq_set_type;
+       gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger;
        gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE;
        gc->private = fic;
 
index 6acad2ea0fb3565a2604ecbb3fb5235395d62fad..29333497ba10d2707e430e333e0c7d85e132d3ad 100644 (file)
@@ -313,6 +313,7 @@ static void __init sama5d3_aic_irq_fixup(void)
 static const struct of_device_id aic5_irq_fixups[] __initconst = {
        { .compatible = "atmel,sama5d3", .data = sama5d3_aic_irq_fixup },
        { .compatible = "atmel,sama5d4", .data = sama5d3_aic_irq_fixup },
+       { .compatible = "microchip,sam9x60", .data = sama5d3_aic_irq_fixup },
        { /* sentinel */ },
 };
 
@@ -390,3 +391,12 @@ static int __init sama5d4_aic5_of_init(struct device_node *node,
        return aic5_of_init(node, parent, NR_SAMA5D4_IRQS);
 }
 IRQCHIP_DECLARE(sama5d4_aic5, "atmel,sama5d4-aic", sama5d4_aic5_of_init);
+
+#define NR_SAM9X60_IRQS                50
+
+static int __init sam9x60_aic5_of_init(struct device_node *node,
+                                      struct device_node *parent)
+{
+       return aic5_of_init(node, parent, NR_SAM9X60_IRQS);
+}
+IRQCHIP_DECLARE(sam9x60_aic5, "microchip,sam9x60-aic", sam9x60_aic5_of_init);
index 62e54f1a248ba002942bc0bae0c6b97d52f9f857..787e8eec9a7f1862275587f079d2809715baeeec 100644 (file)
@@ -175,6 +175,22 @@ static DEFINE_IDA(its_vpeid_ida);
 #define gic_data_rdist_rd_base()       (gic_data_rdist()->rd_base)
 #define gic_data_rdist_vlpi_base()     (gic_data_rdist_rd_base() + SZ_128K)
 
+static u16 get_its_list(struct its_vm *vm)
+{
+       struct its_node *its;
+       unsigned long its_list = 0;
+
+       list_for_each_entry(its, &its_nodes, entry) {
+               if (!its->is_v4)
+                       continue;
+
+               if (vm->vlpi_count[its->list_nr])
+                       __set_bit(its->list_nr, &its_list);
+       }
+
+       return (u16)its_list;
+}
+
 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
                                               u32 event)
 {
@@ -976,17 +992,15 @@ static void its_send_vmapp(struct its_node *its,
 
 static void its_send_vmovp(struct its_vpe *vpe)
 {
-       struct its_cmd_desc desc;
+       struct its_cmd_desc desc = {};
        struct its_node *its;
        unsigned long flags;
        int col_id = vpe->col_idx;
 
        desc.its_vmovp_cmd.vpe = vpe;
-       desc.its_vmovp_cmd.its_list = (u16)its_list_map;
 
        if (!its_list_map) {
                its = list_first_entry(&its_nodes, struct its_node, entry);
-               desc.its_vmovp_cmd.seq_num = 0;
                desc.its_vmovp_cmd.col = &its->collections[col_id];
                its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
                return;
@@ -1003,6 +1017,7 @@ static void its_send_vmovp(struct its_vpe *vpe)
        raw_spin_lock_irqsave(&vmovp_lock, flags);
 
        desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
+       desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
 
        /* Emit VMOVPs */
        list_for_each_entry(its, &its_nodes, entry) {
index 422664ac5f533b4c69f15933059945eb0c59994e..1edc99335a946dbbac46ce86b5db3d93095b455f 100644 (file)
@@ -59,7 +59,7 @@ static struct gic_chip_data gic_data __read_mostly;
 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
 
 #define GIC_ID_NR      (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
-#define GIC_LINE_NR    max(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
+#define GIC_LINE_NR    min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
 #define GIC_ESPI_NR    GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
 
 /*
index c72c036aea768001a19d5d702f490f005f6636cd..7d0a12fe2714a72979cf56281be5a78fa644064a 100644 (file)
@@ -97,7 +97,7 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
        }
 }
 
-static void plic_irq_enable(struct irq_data *d)
+static void plic_irq_unmask(struct irq_data *d)
 {
        unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
                                           cpu_online_mask);
@@ -106,7 +106,7 @@ static void plic_irq_enable(struct irq_data *d)
        plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 }
 
-static void plic_irq_disable(struct irq_data *d)
+static void plic_irq_mask(struct irq_data *d)
 {
        plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
 }
@@ -125,10 +125,8 @@ static int plic_set_affinity(struct irq_data *d,
        if (cpu >= nr_cpu_ids)
                return -EINVAL;
 
-       if (!irqd_irq_disabled(d)) {
-               plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
-               plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
-       }
+       plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
+       plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
 
        irq_data_update_effective_affinity(d, cpumask_of(cpu));
 
@@ -136,14 +134,18 @@ static int plic_set_affinity(struct irq_data *d,
 }
 #endif
 
+static void plic_irq_eoi(struct irq_data *d)
+{
+       struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
+
+       writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM);
+}
+
 static struct irq_chip plic_chip = {
        .name           = "SiFive PLIC",
-       /*
-        * There is no need to mask/unmask PLIC interrupts.  They are "masked"
-        * by reading claim and "unmasked" when writing it back.
-        */
-       .irq_enable     = plic_irq_enable,
-       .irq_disable    = plic_irq_disable,
+       .irq_mask       = plic_irq_mask,
+       .irq_unmask     = plic_irq_unmask,
+       .irq_eoi        = plic_irq_eoi,
 #ifdef CONFIG_SMP
        .irq_set_affinity = plic_set_affinity,
 #endif
@@ -152,7 +154,7 @@ static struct irq_chip plic_chip = {
 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
                              irq_hw_number_t hwirq)
 {
-       irq_set_chip_and_handler(irq, &plic_chip, handle_simple_irq);
+       irq_set_chip_and_handler(irq, &plic_chip, handle_fasteoi_irq);
        irq_set_chip_data(irq, NULL);
        irq_set_noprobe(irq);
        return 0;
@@ -188,7 +190,6 @@ static void plic_handle_irq(struct pt_regs *regs)
                                        hwirq);
                else
                        generic_handle_irq(irq);
-               writel(hwirq, claim);
        }
        csr_set(sie, SIE_SEIE);
 }
@@ -251,8 +252,8 @@ static int __init plic_init(struct device_node *node,
                        continue;
                }
 
-               /* skip context holes */
-               if (parent.args[0] == -1)
+               /* skip contexts other than supervisor external interrupt */
+               if (parent.args[0] != IRQ_S_EXT)
                        continue;
 
                hartid = plic_find_hart_id(parent.np);
index c92b405b7646cfbceaed3b6544fb71d145c9bb42..ba8619524231ccde29093076e6196652d17dd69b 100644 (file)
@@ -744,7 +744,7 @@ capi_poll(struct file *file, poll_table *wait)
 
        poll_wait(file, &(cdev->recvwait), wait);
        mask = EPOLLOUT | EPOLLWRNORM;
-       if (!skb_queue_empty(&cdev->recvqueue))
+       if (!skb_queue_empty_lockless(&cdev->recvqueue))
                mask |= EPOLLIN | EPOLLRDNORM;
        return mask;
 }
index 705c6200814b633abf9794b044f3a7b97aa7d269..7b726f00f1834d1cd4aa544090215ad9562e4c08 100644 (file)
@@ -18,7 +18,7 @@
 
 static int clamped;
 static struct wf_control *clamp_control;
-static struct dev_pm_qos_request qos_req;
+static struct freq_qos_request qos_req;
 static unsigned int min_freq, max_freq;
 
 static int clamp_set(struct wf_control *ct, s32 value)
@@ -35,7 +35,7 @@ static int clamp_set(struct wf_control *ct, s32 value)
        }
        clamped = value;
 
-       return dev_pm_qos_update_request(&qos_req, freq);
+       return freq_qos_update_request(&qos_req, freq);
 }
 
 static int clamp_get(struct wf_control *ct, s32 *value)
@@ -77,38 +77,44 @@ static int __init wf_cpufreq_clamp_init(void)
 
        min_freq = policy->cpuinfo.min_freq;
        max_freq = policy->cpuinfo.max_freq;
+
+       ret = freq_qos_add_request(&policy->constraints, &qos_req, FREQ_QOS_MAX,
+                                  max_freq);
+
        cpufreq_cpu_put(policy);
 
+       if (ret < 0) {
+               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
+                      ret);
+               return ret;
+       }
+
        dev = get_cpu_device(0);
        if (unlikely(!dev)) {
                pr_warn("%s: No cpu device for cpu0\n", __func__);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto fail;
        }
 
        clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
-       if (clamp == NULL)
-               return -ENOMEM;
-
-       ret = dev_pm_qos_add_request(dev, &qos_req, DEV_PM_QOS_MAX_FREQUENCY,
-                                    max_freq);
-       if (ret < 0) {
-               pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
-                      ret);
-               goto free;
+       if (clamp == NULL) {
+               ret = -ENOMEM;
+               goto fail;
        }
 
        clamp->ops = &clamp_ops;
        clamp->name = "cpufreq-clamp";
        ret = wf_register_control(clamp);
        if (ret)
-               goto fail;
+               goto free;
+
        clamp_control = clamp;
        return 0;
- fail:
-       dev_pm_qos_remove_request(&qos_req);
 
  free:
        kfree(clamp);
+ fail:
+       freq_qos_remove_request(&qos_req);
        return ret;
 }
 
@@ -116,7 +122,7 @@ static void __exit wf_cpufreq_clamp_exit(void)
 {
        if (clamp_control) {
                wf_unregister_control(clamp_control);
-               dev_pm_qos_remove_request(&qos_req);
+               freq_qos_remove_request(&qos_req);
        }
 }
 
index d249cf8ac277e4246d8f398c8aa3b2e46fe90724..8346e6d1816c01bb09cb161ab026f970b520f9e9 100644 (file)
@@ -542,7 +542,7 @@ static void wake_migration_worker(struct cache *cache)
 
 static struct dm_bio_prison_cell_v2 *alloc_prison_cell(struct cache *cache)
 {
-       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOWAIT);
+       return dm_bio_prison_alloc_cell_v2(cache->prison, GFP_NOIO);
 }
 
 static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell_v2 *cell)
@@ -554,9 +554,7 @@ static struct dm_cache_migration *alloc_migration(struct cache *cache)
 {
        struct dm_cache_migration *mg;
 
-       mg = mempool_alloc(&cache->migration_pool, GFP_NOWAIT);
-       if (!mg)
-               return NULL;
+       mg = mempool_alloc(&cache->migration_pool, GFP_NOIO);
 
        memset(mg, 0, sizeof(*mg));
 
@@ -664,10 +662,6 @@ static bool bio_detain_shared(struct cache *cache, dm_oblock_t oblock, struct bi
        struct dm_bio_prison_cell_v2 *cell_prealloc, *cell;
 
        cell_prealloc = alloc_prison_cell(cache); /* FIXME: allow wait if calling from worker */
-       if (!cell_prealloc) {
-               defer_bio(cache, bio);
-               return false;
-       }
 
        build_key(oblock, end, &key);
        r = dm_cell_get_v2(cache->prison, &key, lock_level(bio), bio, cell_prealloc, &cell);
@@ -1493,11 +1487,6 @@ static int mg_lock_writes(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               DMERR_LIMIT("%s: alloc_prison_cell failed", cache_device_name(cache));
-               mg_complete(mg, false);
-               return -ENOMEM;
-       }
 
        /*
         * Prevent writes to the block, but allow reads to continue.
@@ -1535,11 +1524,6 @@ static int mg_start(struct cache *cache, struct policy_work *op, struct bio *bio
        }
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               policy_complete_background_work(cache->policy, op, false);
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->op = op;
        mg->overwrite_bio = bio;
@@ -1628,10 +1612,6 @@ static int invalidate_lock(struct dm_cache_migration *mg)
        struct dm_bio_prison_cell_v2 *prealloc;
 
        prealloc = alloc_prison_cell(cache);
-       if (!prealloc) {
-               invalidate_complete(mg, false);
-               return -ENOMEM;
-       }
 
        build_key(mg->invalidate_oblock, oblock_succ(mg->invalidate_oblock), &key);
        r = dm_cell_lock_v2(cache->prison, &key,
@@ -1669,10 +1649,6 @@ static int invalidate_start(struct cache *cache, dm_cblock_t cblock,
                return -EPERM;
 
        mg = alloc_migration(cache);
-       if (!mg) {
-               background_work_end(cache);
-               return -ENOMEM;
-       }
 
        mg->overwrite_bio = bio;
        mg->invalidate_cblock = cblock;
index cd6f9e9fc98e56112c533a45980478bab02d9777..4ca8f19772224a9bc3c3cdc76b1fbe2a7774d20f 100644 (file)
@@ -591,8 +591,8 @@ static struct hash_table_bucket *get_hash_table_bucket(struct clone *clone,
  *
  * NOTE: Must be called with the bucket lock held
  */
-struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
-                                             unsigned long region_nr)
+static struct dm_clone_region_hydration *__hash_find(struct hash_table_bucket *bucket,
+                                                    unsigned long region_nr)
 {
        struct dm_clone_region_hydration *hd;
 
index f150f5c5492b9fe3fdf02e3e5f24dd120fd609fc..4fb1a40e68a08748b94069802c1d8f666bcb7566 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/vmalloc.h>
 #include <linux/log2.h>
 #include <linux/dm-kcopyd.h>
-#include <linux/semaphore.h>
 
 #include "dm.h"
 
@@ -107,8 +106,8 @@ struct dm_snapshot {
        /* The on disk metadata handler */
        struct dm_exception_store *store;
 
-       /* Maximum number of in-flight COW jobs. */
-       struct semaphore cow_count;
+       unsigned in_progress;
+       struct wait_queue_head in_progress_wait;
 
        struct dm_kcopyd_client *kcopyd_client;
 
@@ -162,8 +161,8 @@ struct dm_snapshot {
  */
 #define DEFAULT_COW_THRESHOLD 2048
 
-static int cow_threshold = DEFAULT_COW_THRESHOLD;
-module_param_named(snapshot_cow_threshold, cow_threshold, int, 0644);
+static unsigned cow_threshold = DEFAULT_COW_THRESHOLD;
+module_param_named(snapshot_cow_threshold, cow_threshold, uint, 0644);
 MODULE_PARM_DESC(snapshot_cow_threshold, "Maximum number of chunks being copied on write");
 
 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
@@ -1327,7 +1326,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad_hash_tables;
        }
 
-       sema_init(&s->cow_count, (cow_threshold > 0) ? cow_threshold : INT_MAX);
+       init_waitqueue_head(&s->in_progress_wait);
 
        s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
        if (IS_ERR(s->kcopyd_client)) {
@@ -1509,9 +1508,56 @@ static void snapshot_dtr(struct dm_target *ti)
 
        dm_put_device(ti, s->origin);
 
+       WARN_ON(s->in_progress);
+
        kfree(s);
 }
 
+static void account_start_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       s->in_progress++;
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static void account_end_copy(struct dm_snapshot *s)
+{
+       spin_lock(&s->in_progress_wait.lock);
+       BUG_ON(!s->in_progress);
+       s->in_progress--;
+       if (likely(s->in_progress <= cow_threshold) &&
+           unlikely(waitqueue_active(&s->in_progress_wait)))
+               wake_up_locked(&s->in_progress_wait);
+       spin_unlock(&s->in_progress_wait.lock);
+}
+
+static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins)
+{
+       if (unlikely(s->in_progress > cow_threshold)) {
+               spin_lock(&s->in_progress_wait.lock);
+               if (likely(s->in_progress > cow_threshold)) {
+                       /*
+                        * NOTE: this throttle doesn't account for whether
+                        * the caller is servicing an IO that will trigger a COW
+                        * so excess throttling may result for chunks not required
+                        * to be COW'd.  But if cow_threshold was reached, extra
+                        * throttling is unlikely to negatively impact performance.
+                        */
+                       DECLARE_WAITQUEUE(wait, current);
+                       __add_wait_queue(&s->in_progress_wait, &wait);
+                       __set_current_state(TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&s->in_progress_wait.lock);
+                       if (unlock_origins)
+                               up_read(&_origins_lock);
+                       io_schedule();
+                       remove_wait_queue(&s->in_progress_wait, &wait);
+                       return false;
+               }
+               spin_unlock(&s->in_progress_wait.lock);
+       }
+       return true;
+}
+
 /*
  * Flush a list of buffers.
  */
@@ -1527,7 +1573,7 @@ static void flush_bios(struct bio *bio)
        }
 }
 
-static int do_origin(struct dm_dev *origin, struct bio *bio);
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit);
 
 /*
  * Flush a list of buffers.
@@ -1540,7 +1586,7 @@ static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio)
        while (bio) {
                n = bio->bi_next;
                bio->bi_next = NULL;
-               r = do_origin(s->origin, bio);
+               r = do_origin(s->origin, bio, false);
                if (r == DM_MAPIO_REMAPPED)
                        generic_make_request(bio);
                bio = n;
@@ -1732,7 +1778,7 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
                rb_link_node(&pe->out_of_order_node, parent, p);
                rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree);
        }
-       up(&s->cow_count);
+       account_end_copy(s);
 }
 
 /*
@@ -1756,7 +1802,7 @@ static void start_copy(struct dm_snap_pending_exception *pe)
        dest.count = src.count;
 
        /* Hand over to kcopyd */
-       down(&s->cow_count);
+       account_start_copy(s);
        dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe);
 }
 
@@ -1776,7 +1822,7 @@ static void start_full_bio(struct dm_snap_pending_exception *pe,
        pe->full_bio = bio;
        pe->full_bio_end_io = bio->bi_end_io;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client,
                                                   copy_callback, pe);
 
@@ -1866,7 +1912,7 @@ static void zero_callback(int read_err, unsigned long write_err, void *context)
        struct bio *bio = context;
        struct dm_snapshot *s = bio->bi_private;
 
-       up(&s->cow_count);
+       account_end_copy(s);
        bio->bi_status = write_err ? BLK_STS_IOERR : 0;
        bio_endio(bio);
 }
@@ -1880,7 +1926,7 @@ static void zero_exception(struct dm_snapshot *s, struct dm_exception *e,
        dest.sector = bio->bi_iter.bi_sector;
        dest.count = s->store->chunk_size;
 
-       down(&s->cow_count);
+       account_start_copy(s);
        WARN_ON_ONCE(bio->bi_private);
        bio->bi_private = s;
        dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio);
@@ -1916,6 +1962,11 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
        if (!s->valid)
                return DM_MAPIO_KILL;
 
+       if (bio_data_dir(bio) == WRITE) {
+               while (unlikely(!wait_for_in_progress(s, false)))
+                       ; /* wait_for_in_progress() has slept */
+       }
+
        down_read(&s->lock);
        dm_exception_table_lock(&lock);
 
@@ -2112,7 +2163,7 @@ redirect_to_origin:
 
        if (bio_data_dir(bio) == WRITE) {
                up_write(&s->lock);
-               return do_origin(s->origin, bio);
+               return do_origin(s->origin, bio, false);
        }
 
 out_unlock:
@@ -2487,15 +2538,24 @@ next_snapshot:
 /*
  * Called on a write from the origin driver.
  */
-static int do_origin(struct dm_dev *origin, struct bio *bio)
+static int do_origin(struct dm_dev *origin, struct bio *bio, bool limit)
 {
        struct origin *o;
        int r = DM_MAPIO_REMAPPED;
 
+again:
        down_read(&_origins_lock);
        o = __lookup_origin(origin->bdev);
-       if (o)
+       if (o) {
+               if (limit) {
+                       struct dm_snapshot *s;
+                       list_for_each_entry(s, &o->snapshots, list)
+                               if (unlikely(!wait_for_in_progress(s, true)))
+                                       goto again;
+               }
+
                r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
+       }
        up_read(&_origins_lock);
 
        return r;
@@ -2608,7 +2668,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
                dm_accept_partial_bio(bio, available_sectors);
 
        /* Only tell snapshots if this is a write */
-       return do_origin(o->dev, bio);
+       return do_origin(o->dev, bio, true);
 }
 
 /*
index f61693e596845aebafbbedb8f940e0eaec9b0bee..1e772287b1c8e7fa2ec54724c12d69526fbc57fa 100644 (file)
@@ -154,7 +154,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        } else {
                pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
                       mdname(mddev));
-               pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
+               pr_err("md/raid0: please set raid0.default_layout to 1 or 2\n");
                err = -ENOTSUPP;
                goto abort;
        }
index cfca3c70599ba34fffa567c78a866c6116655781..21f90a8874852b955a84ef83b6e12f535e149b3b 100644 (file)
@@ -643,8 +643,7 @@ static int v4l_stk_release(struct file *fp)
                dev->owner = NULL;
        }
 
-       if (is_present(dev))
-               usb_autopm_put_interface(dev->interface);
+       usb_autopm_put_interface(dev->interface);
        mutex_unlock(&dev->lock);
        return v4l2_fh_release(fp);
 }
index 32747425297d840ba4458c6345946b07fec8ba85..64fff6abe60e8394782089931e59e374b68ee5bf 100644 (file)
@@ -941,7 +941,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
        if (!cnt) {
                rc = -ENODEV;
                pci_dev_busy = 1;
-               goto err_out;
+               goto err_out_int;
        }
 
        jm = kzalloc(sizeof(struct jmb38x_ms)
index 310dae26ddff6283a0b83c05cd5c31b73834cf97..b2c325ead1c8f08b594a9da913ec5013e3068abc 100644 (file)
@@ -129,11 +129,27 @@ static int mt6397_irq_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(mt6397_pm_ops, mt6397_irq_suspend,
                        mt6397_irq_resume);
 
+struct chip_data {
+       u32 cid_addr;
+       u32 cid_shift;
+};
+
+static const struct chip_data mt6323_core = {
+       .cid_addr = MT6323_CID,
+       .cid_shift = 0,
+};
+
+static const struct chip_data mt6397_core = {
+       .cid_addr = MT6397_CID,
+       .cid_shift = 0,
+};
+
 static int mt6397_probe(struct platform_device *pdev)
 {
        int ret;
        unsigned int id;
        struct mt6397_chip *pmic;
+       const struct chip_data *pmic_core;
 
        pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL);
        if (!pmic)
@@ -149,28 +165,30 @@ static int mt6397_probe(struct platform_device *pdev)
        if (!pmic->regmap)
                return -ENODEV;
 
-       platform_set_drvdata(pdev, pmic);
+       pmic_core = of_device_get_match_data(&pdev->dev);
+       if (!pmic_core)
+               return -ENODEV;
 
-       ret = regmap_read(pmic->regmap, MT6397_CID, &id);
+       ret = regmap_read(pmic->regmap, pmic_core->cid_addr, &id);
        if (ret) {
-               dev_err(pmic->dev, "Failed to read chip id: %d\n", ret);
+               dev_err(&pdev->dev, "Failed to read chip id: %d\n", ret);
                return ret;
        }
 
+       pmic->chip_id = (id >> pmic_core->cid_shift) & 0xff;
+
+       platform_set_drvdata(pdev, pmic);
+
        pmic->irq = platform_get_irq(pdev, 0);
        if (pmic->irq <= 0)
                return pmic->irq;
 
-       switch (id & 0xff) {
-       case MT6323_CHIP_ID:
-               pmic->int_con[0] = MT6323_INT_CON0;
-               pmic->int_con[1] = MT6323_INT_CON1;
-               pmic->int_status[0] = MT6323_INT_STATUS0;
-               pmic->int_status[1] = MT6323_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
+       ret = mt6397_irq_init(pmic);
+       if (ret)
+               return ret;
 
+       switch (pmic->chip_id) {
+       case MT6323_CHIP_ID:
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6323_devs,
                                           ARRAY_SIZE(mt6323_devs), NULL,
                                           0, pmic->irq_domain);
@@ -178,21 +196,13 @@ static int mt6397_probe(struct platform_device *pdev)
 
        case MT6391_CHIP_ID:
        case MT6397_CHIP_ID:
-               pmic->int_con[0] = MT6397_INT_CON0;
-               pmic->int_con[1] = MT6397_INT_CON1;
-               pmic->int_status[0] = MT6397_INT_STATUS0;
-               pmic->int_status[1] = MT6397_INT_STATUS1;
-               ret = mt6397_irq_init(pmic);
-               if (ret)
-                       return ret;
-
                ret = devm_mfd_add_devices(&pdev->dev, -1, mt6397_devs,
                                           ARRAY_SIZE(mt6397_devs), NULL,
                                           0, pmic->irq_domain);
                break;
 
        default:
-               dev_err(&pdev->dev, "unsupported chip: %d\n", id);
+               dev_err(&pdev->dev, "unsupported chip: %d\n", pmic->chip_id);
                return -ENODEV;
        }
 
@@ -205,9 +215,15 @@ static int mt6397_probe(struct platform_device *pdev)
 }
 
 static const struct of_device_id mt6397_of_match[] = {
-       { .compatible = "mediatek,mt6397" },
-       { .compatible = "mediatek,mt6323" },
-       { }
+       {
+               .compatible = "mediatek,mt6323",
+               .data = &mt6323_core,
+       }, {
+               .compatible = "mediatek,mt6397",
+               .data = &mt6397_core,
+       }, {
+               /* sentinel */
+       }
 };
 MODULE_DEVICE_TABLE(of, mt6397_of_match);
 
index 47ae84afac2e1ee39e93d771f60ebdb49bb78092..1b1a794d639d0d8518e1bbb351966aee8ee74b23 100644 (file)
@@ -527,6 +527,7 @@ static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf,
                              FASTRPC_PHYS(buffer->phys), buffer->size);
        if (ret < 0) {
                dev_err(buffer->dev, "failed to get scatterlist from DMA API\n");
+               kfree(a);
                return -EINVAL;
        }
 
index 32e9b1aed2ca5570511fca78d6fcc93aa9abaca0..0a2b99e1af45f5200d8fc6e708807693ff53abe1 100644 (file)
@@ -218,13 +218,21 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
 {
        int ret;
 
+       /* No need to enable the client if nothing is needed from it */
+       if (!cldev->bus->fw_f_fw_ver_supported &&
+           !cldev->bus->hbm_f_os_supported)
+               return;
+
        ret = mei_cldev_enable(cldev);
        if (ret)
                return;
 
-       ret = mei_fwver(cldev);
-       if (ret < 0)
-               dev_err(&cldev->dev, "FW version command failed %d\n", ret);
+       if (cldev->bus->fw_f_fw_ver_supported) {
+               ret = mei_fwver(cldev);
+               if (ret < 0)
+                       dev_err(&cldev->dev, "FW version command failed %d\n",
+                               ret);
+       }
 
        if (cldev->bus->hbm_f_os_supported) {
                ret = mei_osver(cldev);
index 77f7dff7098d4633f4e37e691027fd3c36c1d7a6..c09f8bb49495ff962daf96a83bcb6cb83ee47f27 100644 (file)
@@ -79,6 +79,9 @@
 #define MEI_DEV_ID_CNP_H      0xA360  /* Cannon Point H */
 #define MEI_DEV_ID_CNP_H_4    0xA364  /* Cannon Point H 4 (iTouch) */
 
+#define MEI_DEV_ID_CMP_LP     0x02e0  /* Comet Point LP */
+#define MEI_DEV_ID_CMP_LP_3   0x02e4  /* Comet Point LP 3 (iTouch) */
+
 #define MEI_DEV_ID_ICP_LP     0x34E0  /* Ice Lake Point LP */
 
 #define MEI_DEV_ID_TGP_LP     0xA0E0  /* Tiger Lake Point LP */
index abe1b1f4362f14a196f76c2408303630832e1a3e..c4f6991d30289b368c4b51aa6348134acb31731f 100644 (file)
@@ -1355,6 +1355,8 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
 #define MEI_CFG_FW_SPS                           \
        .quirk_probe = mei_me_fw_type_sps
 
+#define MEI_CFG_FW_VER_SUPP                     \
+       .fw_ver_supported = 1
 
 #define MEI_CFG_ICH_HFS                      \
        .fw_status.count = 0
@@ -1392,31 +1394,41 @@ static const struct mei_cfg mei_me_ich10_cfg = {
        MEI_CFG_ICH10_HFS,
 };
 
-/* PCH devices */
-static const struct mei_cfg mei_me_pch_cfg = {
+/* PCH6 devices */
+static const struct mei_cfg mei_me_pch6_cfg = {
        MEI_CFG_PCH_HFS,
 };
 
+/* PCH7 devices */
+static const struct mei_cfg mei_me_pch7_cfg = {
+       MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
+};
+
 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
 static const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
        MEI_CFG_PCH_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_NM,
 };
 
 /* PCH8 Lynx Point and newer devices */
 static const struct mei_cfg mei_me_pch8_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
 };
 
 /* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
 static const struct mei_cfg mei_me_pch8_sps_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_FW_SPS,
 };
 
 /* Cannon Lake and newer devices */
 static const struct mei_cfg mei_me_pch12_cfg = {
        MEI_CFG_PCH8_HFS,
+       MEI_CFG_FW_VER_SUPP,
        MEI_CFG_DMA_128,
 };
 
@@ -1428,7 +1440,8 @@ static const struct mei_cfg *const mei_cfg_list[] = {
        [MEI_ME_UNDEF_CFG] = NULL,
        [MEI_ME_ICH_CFG] = &mei_me_ich_cfg,
        [MEI_ME_ICH10_CFG] = &mei_me_ich10_cfg,
-       [MEI_ME_PCH_CFG] = &mei_me_pch_cfg,
+       [MEI_ME_PCH6_CFG] = &mei_me_pch6_cfg,
+       [MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
        [MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
        [MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
        [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
@@ -1473,6 +1486,8 @@ struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
        mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
        hw->cfg = cfg;
 
+       dev->fw_f_fw_ver_supported = cfg->fw_ver_supported;
+
        return dev;
 }
 
index 08c84a0de4a8dbca3a097a4f2691002eda43d1f5..1d8794828cbc29cd05d7c7ff32eb3e51a9b40fc9 100644 (file)
  * @fw_status: FW status
  * @quirk_probe: device exclusion quirk
  * @dma_size: device DMA buffers size
+ * @fw_ver_supported: is fw version retrievable from FW
  */
 struct mei_cfg {
        const struct mei_fw_status fw_status;
        bool (*quirk_probe)(struct pci_dev *pdev);
        size_t dma_size[DMA_DSCR_NUM];
+       u32 fw_ver_supported:1;
 };
 
 
@@ -62,7 +64,8 @@ struct mei_me_hw {
  * @MEI_ME_UNDEF_CFG:      Lower sentinel.
  * @MEI_ME_ICH_CFG:        I/O Controller Hub legacy devices.
  * @MEI_ME_ICH10_CFG:      I/O Controller Hub platforms Gen10
- * @MEI_ME_PCH_CFG:        Platform Controller Hub platforms (Up to Gen8).
+ * @MEI_ME_PCH6_CFG:       Platform Controller Hub platforms (Gen6).
+ * @MEI_ME_PCH7_CFG:       Platform Controller Hub platforms (Gen7).
  * @MEI_ME_PCH_CPT_PBG_CFG:Platform Controller Hub workstations
  *                         with quirk for Node Manager exclusion.
  * @MEI_ME_PCH8_CFG:       Platform Controller Hub Gen8 and newer
@@ -77,7 +80,8 @@ enum mei_cfg_idx {
        MEI_ME_UNDEF_CFG,
        MEI_ME_ICH_CFG,
        MEI_ME_ICH10_CFG,
-       MEI_ME_PCH_CFG,
+       MEI_ME_PCH6_CFG,
+       MEI_ME_PCH7_CFG,
        MEI_ME_PCH_CPT_PBG_CFG,
        MEI_ME_PCH8_CFG,
        MEI_ME_PCH8_SPS_CFG,
index f71a023aed3c91616b6bb17fe8b5a32ca5a6c9b5..0f2141178299cdbf2697b94429f40c1e47b702fb 100644 (file)
@@ -426,6 +426,8 @@ struct mei_fw_version {
  *
  * @fw_ver : FW versions
  *
+ * @fw_f_fw_ver_supported : fw feature: fw version supported
+ *
  * @me_clients_rwsem: rw lock over me_clients list
  * @me_clients  : list of FW clients
  * @me_clients_map : FW clients bit map
@@ -506,6 +508,8 @@ struct mei_device {
 
        struct mei_fw_version fw_ver[MEI_MAX_FW_VER_BLOCKS];
 
+       unsigned int fw_f_fw_ver_supported:1;
+
        struct rw_semaphore me_clients_rwsem;
        struct list_head me_clients;
        DECLARE_BITMAP(me_clients_map, MEI_CLIENTS_MAX);
index d5a92c6eadb380b0fab2a1b1e83a0d53b799cd68..3dca63eddaa031c5f91b6a0d0bb06c864133887c 100644 (file)
@@ -61,13 +61,13 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_3, MEI_ME_ICH10_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICH10_4, MEI_ME_ICH10_CFG)},
 
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_1, MEI_ME_PCH6_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_IBXPK_2, MEI_ME_PCH6_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CPT_1, MEI_ME_PCH_CPT_PBG_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_PBG_1, MEI_ME_PCH_CPT_PBG_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH_CFG)},
-       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
        {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
 
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
+       {MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
+
        {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
 
        {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH12_CFG)},
index f7bdae5354c3c48af7bb26d828b83227b057d8bf..5047f7343ffcf41ce3a861ffbb104ac2f6586806 100644 (file)
@@ -611,7 +611,8 @@ static int cqhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
        cq_host->slot[tag].flags = 0;
 
        cq_host->qcnt += 1;
-
+       /* Make sure descriptors are ready before ringing the doorbell */
+       wmb();
        cqhci_writel(cq_host, 1 << tag, CQHCI_TDBR);
        if (!(cqhci_readl(cq_host, CQHCI_TDBR) & (1 << tag)))
                pr_debug("%s: cqhci: doorbell not set for tag %d\n",
index 78e7e350655c7f742970757a4117a8bc00ca8fc5..4031217d21c37be1a847ace9d8369c041ab95561 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/interrupt.h>
 #include <linux/dma-mapping.h>
 #include <linux/dmaengine.h>
+#include <linux/dma/mxs-dma.h>
 #include <linux/highmem.h>
 #include <linux/clk.h>
 #include <linux/err.h>
@@ -266,7 +267,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -311,7 +312,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        ssp->ssp_pio_words[2] = cmd1;
        ssp->dma_dir = DMA_NONE;
        ssp->slave_dirn = DMA_TRANS_NONE;
-       desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
@@ -441,7 +442,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        host->data = data;
        ssp->dma_dir = dma_data_dir;
        ssp->slave_dirn = slave_dirn;
-       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+       desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | MXS_DMA_CTRL_WAIT4END);
        if (!desc)
                goto out;
 
index d4ada5cca2d14f6a041e9723b41d5ae8618c718f..234551a68739b65b0ccc3de9b0967ccaff12a3b9 100644 (file)
@@ -646,8 +646,8 @@ int renesas_sdhi_probe(struct platform_device *pdev,
        struct tmio_mmc_dma *dma_priv;
        struct tmio_mmc_host *host;
        struct renesas_sdhi *priv;
+       int num_irqs, irq, ret, i;
        struct resource *res;
-       int irq, ret, i;
        u16 ver;
 
        of_data = of_device_get_match_data(&pdev->dev);
@@ -825,24 +825,31 @@ int renesas_sdhi_probe(struct platform_device *pdev,
                host->hs400_complete = renesas_sdhi_hs400_complete;
        }
 
-       i = 0;
-       while (1) {
+       num_irqs = platform_irq_count(pdev);
+       if (num_irqs < 0) {
+               ret = num_irqs;
+               goto eirq;
+       }
+
+       /* There must be at least one IRQ source */
+       if (!num_irqs) {
+               ret = -ENXIO;
+               goto eirq;
+       }
+
+       for (i = 0; i < num_irqs; i++) {
                irq = platform_get_irq(pdev, i);
-               if (irq < 0)
-                       break;
-               i++;
+               if (irq < 0) {
+                       ret = irq;
+                       goto eirq;
+               }
+
                ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0,
                                       dev_name(&pdev->dev), host);
                if (ret)
                        goto eirq;
        }
 
-       /* There must be at least one IRQ source */
-       if (!i) {
-               ret = irq;
-               goto eirq;
-       }
-
        dev_info(&pdev->dev, "%s base at 0x%08lx max clock rate %u MHz\n",
                 mmc_hostname(host->mmc), (unsigned long)
                 (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start),
index 2b9cdcd1dd9dd6ffaa458ebd6f7fdc908a6954cf..f4f5f0a70cda8a1d90b3e3a77ca21f318f34b4d7 100644 (file)
@@ -262,6 +262,7 @@ static const struct sdhci_iproc_data bcm2835_data = {
 };
 
 static const struct sdhci_pltfm_data sdhci_bcm2711_pltfm_data = {
+       .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
        .ops = &sdhci_iproc_32only_ops,
 };
 
index 41c2677c587f1677366ed14f65cc50cc3da7da96..083e7e053c95401bcc6e87c6e8d5e3a52605e474 100644 (file)
@@ -372,7 +372,7 @@ static int sdhci_omap_execute_tuning(struct mmc_host *mmc, u32 opcode)
         * on temperature
         */
        if (temperature < -20000)
-               phase_delay = min(max_window + 4 * max_len - 24,
+               phase_delay = min(max_window + 4 * (max_len - 1) - 24,
                                  max_window +
                                  DIV_ROUND_UP(13 * max_len, 16) * 4);
        else if (temperature < 20000)
index 81bd9afb0980525e23658953e03b6e9dcb1e2c33..98c575de43c755ed5772150d1bdbb6538a143332 100644 (file)
@@ -1393,11 +1393,9 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        const char *name;
 
        irq[0] = platform_get_irq(pdev, 0);
-       irq[1] = platform_get_irq(pdev, 1);
-       if (irq[0] < 0) {
-               dev_err(dev, "Get irq error\n");
+       irq[1] = platform_get_irq_optional(pdev, 1);
+       if (irq[0] < 0)
                return -ENXIO;
-       }
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(dev, res);
index 97a97a9ccc3618a86730fa3be1d5f56b03797ad5..e10b76089048e06dbfd83a41f05ac14fca01ceb7 100644 (file)
@@ -134,16 +134,15 @@ static void au_write_buf16(struct nand_chip *this, const u_char *buf, int len)
 
 /**
  * au_read_buf16 -  read chip data into buffer
- * @mtd:       MTD device structure
+ * @this:      NAND chip object
  * @buf:       buffer to store date
  * @len:       number of bytes to read
  *
  * read function for 16bit buswidth
  */
-static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+static void au_read_buf16(struct nand_chip *this, u_char *buf, int len)
 {
        int i;
-       struct nand_chip *this = mtd_to_nand(mtd);
        u16 *p = (u16 *) buf;
        len >>= 1;
 
index 1d8621d43160de3da11a6ac668cd6f2654209807..7acf4a93b5923116f113f89e75f73cb8510c1faf 100644 (file)
@@ -487,7 +487,7 @@ static int write_sr(struct spi_nor *nor, u8 val)
                        SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_WRSR, 1),
                                   SPI_MEM_OP_NO_ADDR,
                                   SPI_MEM_OP_NO_DUMMY,
-                                  SPI_MEM_OP_DATA_IN(1, nor->bouncebuf, 1));
+                                  SPI_MEM_OP_DATA_OUT(1, nor->bouncebuf, 1));
 
                return spi_mem_exec_op(nor->spimem, &op);
        }
index 8c79bad2a9a57dfd9f1841add3597590ab02e3db..4f2e6910c6232618ab536c83cdc83ba1a398b041 100644 (file)
@@ -952,7 +952,7 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
        struct bond_vlan_tag *tags;
 
        if (is_vlan_dev(upper) &&
-           bond->nest_level == vlan_get_encap_level(upper) - 1) {
+           bond->dev->lower_level == upper->lower_level - 1) {
                if (upper->addr_assign_type == NET_ADDR_STOLEN) {
                        alb_send_lp_vid(slave, mac_addr,
                                        vlan_dev_vlan_proto(upper),
index 931d9d9356869b400c1b653414164a3e5ecb0b51..480f9459b402c578ac1b09f8c48ae2a730852c3c 100644 (file)
@@ -1733,8 +1733,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
                goto err_upper_unlink;
        }
 
-       bond->nest_level = dev_get_nest_level(bond_dev) + 1;
-
        /* If the mode uses primary, then the following is handled by
         * bond_change_active_slave().
         */
@@ -1816,7 +1814,8 @@ err_detach:
        slave_disable_netpoll(new_slave);
 
 err_close:
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
        dev_close(slave_dev);
 
 err_restore_mac:
@@ -1956,9 +1955,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
-               bond->nest_level = SINGLE_DEPTH_NESTING;
-       } else {
-               bond->nest_level = dev_get_nest_level(bond_dev) + 1;
        }
 
        unblock_netpoll_tx();
@@ -2017,7 +2013,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        else
                dev_set_mtu(slave_dev, slave->original_mtu);
 
-       slave_dev->priv_flags &= ~IFF_BONDING;
+       if (!netif_is_bond_master(slave_dev))
+               slave_dev->priv_flags &= ~IFF_BONDING;
 
        bond_free_slave(slave);
 
@@ -3442,13 +3439,6 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
        }
 }
 
-static int bond_get_nest_level(struct net_device *bond_dev)
-{
-       struct bonding *bond = netdev_priv(bond_dev);
-
-       return bond->nest_level;
-}
-
 static void bond_get_stats(struct net_device *bond_dev,
                           struct rtnl_link_stats64 *stats)
 {
@@ -3457,7 +3447,7 @@ static void bond_get_stats(struct net_device *bond_dev,
        struct list_head *iter;
        struct slave *slave;
 
-       spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
+       spin_lock(&bond->stats_lock);
        memcpy(stats, &bond->bond_stats, sizeof(*stats));
 
        rcu_read_lock();
@@ -4039,7 +4029,7 @@ out:
                 * this to-be-skipped slave to send a packet out.
                 */
                old_arr = rtnl_dereference(bond->slave_arr);
-               for (idx = 0; idx < old_arr->count; idx++) {
+               for (idx = 0; old_arr != NULL && idx < old_arr->count; idx++) {
                        if (skipslave == old_arr->arr[idx]) {
                                old_arr->arr[idx] =
                                    old_arr->arr[old_arr->count-1];
@@ -4268,7 +4258,6 @@ static const struct net_device_ops bond_netdev_ops = {
        .ndo_neigh_setup        = bond_neigh_setup,
        .ndo_vlan_rx_add_vid    = bond_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
-       .ndo_get_lock_subclass  = bond_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_netpoll_setup      = bond_netpoll_setup,
        .ndo_netpoll_cleanup    = bond_netpoll_cleanup,
@@ -4296,7 +4285,6 @@ void bond_setup(struct net_device *bond_dev)
        struct bonding *bond = netdev_priv(bond_dev);
 
        spin_lock_init(&bond->mode_lock);
-       spin_lock_init(&bond->stats_lock);
        bond->params = bonding_defaults;
 
        /* Initialize pointers */
@@ -4365,6 +4353,7 @@ static void bond_uninit(struct net_device *bond_dev)
 
        list_del(&bond->bond_list);
 
+       lockdep_unregister_key(&bond->stats_lock_key);
        bond_debug_unregister(bond);
 }
 
@@ -4768,8 +4757,9 @@ static int bond_init(struct net_device *bond_dev)
        if (!bond->wq)
                return -ENOMEM;
 
-       bond->nest_level = SINGLE_DEPTH_NESTING;
-       netdev_lockdep_set_classes(bond_dev);
+       spin_lock_init(&bond->stats_lock);
+       lockdep_register_key(&bond->stats_lock_key);
+       lockdep_set_class(&bond->stats_lock, &bond->stats_lock_key);
 
        list_add_tail(&bond->bond_list, &bn->dev_list);
 
index 526ba2ab66f10b027ec5861d481be26a26e890af..cc3536315eff55c5bbc1942f09490fa44ca900e7 100644 (file)
@@ -1845,7 +1845,6 @@ int b53_mirror_add(struct dsa_switch *ds, int port,
                loc = B53_EG_MIR_CTL;
 
        b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
-       reg &= ~MIRROR_MASK;
        reg |= BIT(port);
        b53_write16(dev, B53_MGMT_PAGE, loc, reg);
 
index 26509fa37a50b4d4137adfef9878ac969464da18..d44651ad520c10d7e21d5f7793baab248e389b7c 100644 (file)
@@ -37,22 +37,11 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
        unsigned int i;
        u32 reg, offset;
 
-       if (priv->type == BCM7445_DEVICE_ID)
-               offset = CORE_STS_OVERRIDE_IMP;
-       else
-               offset = CORE_STS_OVERRIDE_IMP2;
-
        /* Enable the port memories */
        reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
        reg &= ~P_TXQ_PSM_VDD(port);
        core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
 
-       /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
-       reg = core_readl(priv, CORE_IMP_CTL);
-       reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
-       reg &= ~(RX_DIS | TX_DIS);
-       core_writel(priv, reg, CORE_IMP_CTL);
-
        /* Enable forwarding */
        core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
 
@@ -71,10 +60,27 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
 
        b53_brcm_hdr_setup(ds, port);
 
-       /* Force link status for IMP port */
-       reg = core_readl(priv, offset);
-       reg |= (MII_SW_OR | LINK_STS);
-       core_writel(priv, reg, offset);
+       if (port == 8) {
+               if (priv->type == BCM7445_DEVICE_ID)
+                       offset = CORE_STS_OVERRIDE_IMP;
+               else
+                       offset = CORE_STS_OVERRIDE_IMP2;
+
+               /* Force link status for IMP port */
+               reg = core_readl(priv, offset);
+               reg |= (MII_SW_OR | LINK_STS);
+               core_writel(priv, reg, offset);
+
+               /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+               reg = core_readl(priv, CORE_IMP_CTL);
+               reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_IMP_CTL);
+       } else {
+               reg = core_readl(priv, CORE_G_PCTL_PORT(port));
+               reg &= ~(RX_DIS | TX_DIS);
+               core_writel(priv, reg, CORE_G_PCTL_PORT(port));
+       }
 }
 
 static void bcm_sf2_gphy_enable_set(struct dsa_switch *ds, bool enable)
index a23d3ffdf0c4d5c95d555229b8740e2f69500e4e..24a5e99f7fd5b5ee66252ca7b41963f24d090d81 100644 (file)
@@ -1224,10 +1224,6 @@ static int ksz8795_switch_init(struct ksz_device *dev)
 {
        int i;
 
-       mutex_init(&dev->stats_mutex);
-       mutex_init(&dev->alu_mutex);
-       mutex_init(&dev->vlan_mutex);
-
        dev->ds->ops = &ksz8795_switch_ops;
 
        for (i = 0; i < ARRAY_SIZE(ksz8795_switch_chips); i++) {
index d0f8153e86b7d7bddcde1cd30b305c4e6a69b5f5..8b00f8e6c02f4f2a2fbc545c026cb2daaa48528c 100644 (file)
@@ -25,6 +25,7 @@ KSZ_REGMAP_TABLE(ksz8795, 16, SPI_ADDR_SHIFT,
 
 static int ksz8795_spi_probe(struct spi_device *spi)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -33,9 +34,9 @@ static int ksz8795_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_spi(spi,
-                                                     &ksz8795_regmap_config
-                                                     [i]);
+               rc = ksz8795_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&spi->dev,
index 0b1e01f0873d5733336e3c291148f5993494f3c1..fdffd9e0c518b53d0f55e4bb1fe5975ddcebed58 100644 (file)
@@ -17,6 +17,7 @@ KSZ_REGMAP_TABLE(ksz9477, not_used, 16, 0, 0);
 static int ksz9477_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *i2c_id)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -25,8 +26,9 @@ static int ksz9477_i2c_probe(struct i2c_client *i2c,
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_i2c(i2c,
-                                       &ksz9477_regmap_config[i]);
+               rc = ksz9477_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_i2c(i2c, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&i2c->dev,
index 2938e892b6316b5f5d65ff6d0588e611b0cabf40..16939f29faa5ec7f13de863bae18d973012209f4 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
  * Microchip KSZ9477 register definitions
  *
  * Copyright (C) 2017-2018 Microchip Technology Inc.
index f4198d6f72bee55a6ba4f0dca945f0d701870d82..c5f64959a184e25b25adc68f33543a6236b2f874 100644 (file)
@@ -24,6 +24,7 @@ KSZ_REGMAP_TABLE(ksz9477, 32, SPI_ADDR_SHIFT,
 
 static int ksz9477_spi_probe(struct spi_device *spi)
 {
+       struct regmap_config rc;
        struct ksz_device *dev;
        int i, ret;
 
@@ -32,8 +33,9 @@ static int ksz9477_spi_probe(struct spi_device *spi)
                return -ENOMEM;
 
        for (i = 0; i < ARRAY_SIZE(ksz9477_regmap_config); i++) {
-               dev->regmap[i] = devm_regmap_init_spi(spi,
-                                       &ksz9477_regmap_config[i]);
+               rc = ksz9477_regmap_config[i];
+               rc.lock_arg = &dev->regmap_mutex;
+               dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
                if (IS_ERR(dev->regmap[i])) {
                        ret = PTR_ERR(dev->regmap[i]);
                        dev_err(&spi->dev,
index b0b870f0c252095f7844230b3e3fe700e14d75fe..fe47180c908b979fe9df3d40ee6729ca9d78828f 100644 (file)
@@ -436,7 +436,7 @@ int ksz_switch_register(struct ksz_device *dev,
        }
 
        mutex_init(&dev->dev_mutex);
-       mutex_init(&dev->stats_mutex);
+       mutex_init(&dev->regmap_mutex);
        mutex_init(&dev->alu_mutex);
        mutex_init(&dev->vlan_mutex);
 
index dd60d0837fc6db51f744db06fb26f1eeefb83673..a20ebb749377c1356506aabb1ded804d916d4a0e 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Microchip switch driver common header
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Microchip switch driver common header
  *
  * Copyright (C) 2017-2019 Microchip Technology Inc.
  */
@@ -47,7 +47,7 @@ struct ksz_device {
        const char *name;
 
        struct mutex dev_mutex;         /* device access */
-       struct mutex stats_mutex;       /* status access */
+       struct mutex regmap_mutex;      /* regmap access */
        struct mutex alu_mutex;         /* ALU access */
        struct mutex vlan_mutex;        /* vlan access */
        const struct ksz_dev_ops *dev_ops;
@@ -290,6 +290,18 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
        ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
 }
 
+static inline void ksz_regmap_lock(void *__mtx)
+{
+       struct mutex *mtx = __mtx;
+       mutex_lock(mtx);
+}
+
+static inline void ksz_regmap_unlock(void *__mtx)
+{
+       struct mutex *mtx = __mtx;
+       mutex_unlock(mtx);
+}
+
 /* Regmap tables generation */
 #define KSZ_SPI_OP_RD          3
 #define KSZ_SPI_OP_WR          2
@@ -314,6 +326,8 @@ static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
                .write_flag_mask =                                      \
                        KSZ_SPI_OP_FLAG_MASK(KSZ_SPI_OP_WR, swp,        \
                                             regbits, regpad),          \
+               .lock = ksz_regmap_lock,                                \
+               .unlock = ksz_regmap_unlock,                            \
                .reg_format_endian = REGMAP_ENDIAN_BIG,                 \
                .val_format_endian = REGMAP_ENDIAN_BIG                  \
        }
index 684aa51684dbd09cfa209afb3dbbb4b074087b60..b00274caae4fb0f6557f8e50b2d6432d38abc7ae 100644 (file)
@@ -705,7 +705,7 @@ qca8k_setup(struct dsa_switch *ds)
                    BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
 
        /* Setup connection between CPU port & user ports */
-       for (i = 0; i < DSA_MAX_PORTS; i++) {
+       for (i = 0; i < QCA8K_NUM_PORTS; i++) {
                /* CPU port gets connected to all user ports of the switch */
                if (dsa_is_cpu_port(ds, i)) {
                        qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
@@ -1077,7 +1077,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
        if (id != QCA8K_ID_QCA8337)
                return -ENODEV;
 
-       priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
+       priv->ds = dsa_switch_alloc(&mdiodev->dev, QCA8K_NUM_PORTS);
        if (!priv->ds)
                return -ENOMEM;
 
index ca3d17e43ed8be057f3628b3548a58df11656c88..ac88caca5ad4defbc2b0311562fdaebbba1bf8f0 100644 (file)
@@ -339,10 +339,12 @@ int rtl8366_vlan_prepare(struct dsa_switch *ds, int port,
                         const struct switchdev_obj_port_vlan *vlan)
 {
        struct realtek_smi *smi = ds->priv;
+       u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return -EINVAL;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return -EINVAL;
 
        dev_info(smi->dev, "prepare VLANs %04x..%04x\n",
                 vlan->vid_begin, vlan->vid_end);
@@ -370,8 +372,9 @@ void rtl8366_vlan_add(struct dsa_switch *ds, int port,
        u16 vid;
        int ret;
 
-       if (!smi->ops->is_vlan_valid(smi, port))
-               return;
+       for (vid = vlan->vid_begin; vid < vlan->vid_end; vid++)
+               if (!smi->ops->is_vlan_valid(smi, vid))
+                       return;
 
        dev_info(smi->dev, "add VLAN on port %d, %s, %s\n",
                 port,
index a268085ffad28cfdc8c6668c4e42595d03b0e363..f5cc8b0a7c74cfdfa4fe0b0f4e171f457de3f006 100644 (file)
@@ -507,7 +507,8 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
        irq = of_irq_get(intc, 0);
        if (irq <= 0) {
                dev_err(smi->dev, "failed to get parent IRQ\n");
-               return irq ? irq : -EINVAL;
+               ret = irq ? irq : -EINVAL;
+               goto out_put_node;
        }
 
        /* This clears the IRQ status register */
@@ -515,7 +516,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                          &val);
        if (ret) {
                dev_err(smi->dev, "can't read interrupt status\n");
-               return ret;
+               goto out_put_node;
        }
 
        /* Fetch IRQ edge information from the descriptor */
@@ -537,7 +538,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                 val);
        if (ret) {
                dev_err(smi->dev, "could not configure IRQ polarity\n");
-               return ret;
+               goto out_put_node;
        }
 
        ret = devm_request_threaded_irq(smi->dev, irq, NULL,
@@ -545,7 +546,7 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                        "RTL8366RB", smi);
        if (ret) {
                dev_err(smi->dev, "unable to request irq: %d\n", ret);
-               return ret;
+               goto out_put_node;
        }
        smi->irqdomain = irq_domain_add_linear(intc,
                                               RTL8366RB_NUM_INTERRUPT,
@@ -553,12 +554,15 @@ static int rtl8366rb_setup_cascaded_irq(struct realtek_smi *smi)
                                               smi);
        if (!smi->irqdomain) {
                dev_err(smi->dev, "failed to create IRQ domain\n");
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out_put_node;
        }
        for (i = 0; i < smi->num_ports; i++)
                irq_set_parent(irq_create_mapping(smi->irqdomain, i), irq);
 
-       return 0;
+out_put_node:
+       of_node_put(intc);
+       return ret;
 }
 
 static int rtl8366rb_set_addr(struct realtek_smi *smi)
index f40b248f0b23a5581ffa3b3eefd69e4369f3c79b..ffac0ea4e8d56e61cb17858c3f9e1aac96b08885 100644 (file)
@@ -26,8 +26,8 @@ config NET_DSA_SJA1105_PTP
 
 config NET_DSA_SJA1105_TAS
        bool "Support for the Time-Aware Scheduler on NXP SJA1105"
-       depends on NET_DSA_SJA1105
-       depends on NET_SCH_TAPRIO
+       depends on NET_DSA_SJA1105 && NET_SCH_TAPRIO
+       depends on NET_SCH_TAPRIO=y || NET_DSA_SJA1105=m
        help
          This enables support for the TTEthernet-based egress scheduling
          engine in the SJA1105 DSA driver, which is controlled using a
index e53e494c22e09825c3a8bfcedb24d936de8474e9..fbb564c3beb8db1d6c6aff983cfa6363114bf42a 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_H
index 740dadf43f0197338e7e5f8387e558357cc6509e..1fc0d13dc623faca9a9213114387e4b9d6f98725 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_DYNAMIC_CONFIG_H
 #define _SJA1105_DYNAMIC_CONFIG_H
index b9def744bcb3b92d7edf390efc72ca7e465be4eb..7687ddcae1598eafaa4d7815c1a432618e048881 100644 (file)
@@ -1897,7 +1897,9 @@ static int sja1105_set_ageing_time(struct dsa_switch *ds,
        return sja1105_static_config_reload(priv);
 }
 
-/* Caller must hold priv->tagger_data.meta_lock */
+/* Must be called only with priv->tagger_data.state bit
+ * SJA1105_HWTS_RX_EN cleared
+ */
 static int sja1105_change_rxtstamping(struct sja1105_private *priv,
                                      bool on)
 {
@@ -1954,16 +1956,17 @@ static int sja1105_hwtstamp_set(struct dsa_switch *ds, int port,
                break;
        }
 
-       if (rx_on != priv->tagger_data.hwts_rx_en) {
-               spin_lock(&priv->tagger_data.meta_lock);
+       if (rx_on != test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state)) {
+               clear_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
+
                rc = sja1105_change_rxtstamping(priv, rx_on);
-               spin_unlock(&priv->tagger_data.meta_lock);
                if (rc < 0) {
                        dev_err(ds->dev,
                                "Failed to change RX timestamping: %d\n", rc);
-                       return -EFAULT;
+                       return rc;
                }
-               priv->tagger_data.hwts_rx_en = rx_on;
+               if (rx_on)
+                       set_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state);
        }
 
        if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
@@ -1982,7 +1985,7 @@ static int sja1105_hwtstamp_get(struct dsa_switch *ds, int port,
                config.tx_type = HWTSTAMP_TX_ON;
        else
                config.tx_type = HWTSTAMP_TX_OFF;
-       if (priv->tagger_data.hwts_rx_en)
+       if (test_bit(SJA1105_HWTS_RX_EN, &priv->tagger_data.state))
                config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
        else
                config.rx_filter = HWTSTAMP_FILTER_NONE;
@@ -2005,12 +2008,12 @@ static void sja1105_rxtstamp_work(struct work_struct *work)
 
        mutex_lock(&priv->ptp_lock);
 
-       now = priv->tstamp_cc.read(&priv->tstamp_cc);
-
        while ((skb = skb_dequeue(&data->skb_rxtstamp_queue)) != NULL) {
                struct skb_shared_hwtstamps *shwt = skb_hwtstamps(skb);
                u64 ts;
 
+               now = priv->tstamp_cc.read(&priv->tstamp_cc);
+
                *shwt = (struct skb_shared_hwtstamps) {0};
 
                ts = SJA1105_SKB_CB(skb)->meta_tstamp;
@@ -2031,7 +2034,7 @@ static bool sja1105_port_rxtstamp(struct dsa_switch *ds, int port,
        struct sja1105_private *priv = ds->priv;
        struct sja1105_tagger_data *data = &priv->tagger_data;
 
-       if (!data->hwts_rx_en)
+       if (!test_bit(SJA1105_HWTS_RX_EN, &data->state))
                return false;
 
        /* We need to read the full PTP clock to reconstruct the Rx
@@ -2201,6 +2204,7 @@ static int sja1105_probe(struct spi_device *spi)
        tagger_data = &priv->tagger_data;
        skb_queue_head_init(&tagger_data->skb_rxtstamp_queue);
        INIT_WORK(&tagger_data->rxtstamp_work, sja1105_rxtstamp_work);
+       spin_lock_init(&tagger_data->meta_lock);
 
        /* Connections between dsa_port and sja1105_port */
        for (i = 0; i < SJA1105_NUM_PORTS; i++) {
index af456b0a4d27696746586b5fdd16654d17c2323e..394e12a6ad59ba22ae556561e75b03c014b95117 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_PTP_H
 #define _SJA1105_PTP_H
index 84dc603138cf33a102ab64990e220d76e39c246e..58dd37ecde174b2c9441b7b3ae6b51e080326603 100644 (file)
@@ -409,7 +409,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
        rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
        if (rc < 0) {
                dev_err(dev, "Invalid config, cannot upload\n");
-               return -EINVAL;
+               rc = -EINVAL;
+               goto out;
        }
        /* Prevent PHY jabbering during switch reset by inhibiting
         * Tx on all ports and waiting for current packet to drain.
@@ -418,7 +419,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
        rc = sja1105_inhibit_tx(priv, port_bitmap, true);
        if (rc < 0) {
                dev_err(dev, "Failed to inhibit Tx on ports\n");
-               return -ENXIO;
+               rc = -ENXIO;
+               goto out;
        }
        /* Wait for an eventual egress packet to finish transmission
         * (reach IFG). It is guaranteed that a second one will not
index 7f87022a2d61a82a77721b74613a931aa7f972f1..f4a5c5c043116748a07deeb1fbd93b5c8f7c3f77 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright (c) 2016-2018, NXP Semiconductors
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2016-2018, NXP Semiconductors
  * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_STATIC_CONFIG_H
index 0b803c30e64073f716abb0ca07ece61cb2fe1a45..0aad212d88b2e07912da1ef048146ff2f19e747e 100644 (file)
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Vladimir Oltean <olteanv@gmail.com>
  */
 #ifndef _SJA1105_TAS_H
 #define _SJA1105_TAS_H
index b4a0fb281e69ea2631db1e6e45373e0d5a556a27..bb65dd39f847404d64497835a9b03e20d6e2bbde 100644 (file)
@@ -194,9 +194,7 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev)
 {
        struct aq_nic_s *aq_nic = netdev_priv(ndev);
 
-       aq_nic_set_packet_filter(aq_nic, ndev->flags);
-
-       aq_nic_set_multicast_list(aq_nic, ndev);
+       (void)aq_nic_set_multicast_list(aq_nic, ndev);
 }
 
 static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto,
index 8f66e781781182e553c53ba73fde92581de21ee7..137c1de4c6ec04b3b416ad07013b9e0cf2bc4da3 100644 (file)
@@ -631,9 +631,12 @@ err_exit:
 
 int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
 {
-       unsigned int packet_filter = self->packet_filter;
+       const struct aq_hw_ops *hw_ops = self->aq_hw_ops;
+       struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+       unsigned int packet_filter = ndev->flags;
        struct netdev_hw_addr *ha = NULL;
        unsigned int i = 0U;
+       int err = 0;
 
        self->mc_list.count = 0;
        if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
@@ -641,29 +644,28 @@ int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
        } else {
                netdev_for_each_uc_addr(ha, ndev) {
                        ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
-                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
-                               break;
                }
        }
 
-       if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
-               packet_filter |= IFF_ALLMULTI;
-       } else {
-               netdev_for_each_mc_addr(ha, ndev) {
-                       ether_addr_copy(self->mc_list.ar[i++], ha->addr);
-
-                       if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
-                               break;
+       cfg->is_mc_list_enabled = !!(packet_filter & IFF_MULTICAST);
+       if (cfg->is_mc_list_enabled) {
+               if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+                       packet_filter |= IFF_ALLMULTI;
+               } else {
+                       netdev_for_each_mc_addr(ha, ndev) {
+                               ether_addr_copy(self->mc_list.ar[i++],
+                                               ha->addr);
+                       }
                }
        }
 
        if (i > 0 && i <= AQ_HW_MULTICAST_ADDRESS_MAX) {
-               packet_filter |= IFF_MULTICAST;
                self->mc_list.count = i;
-               self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
-                                                      self->mc_list.ar,
-                                                      self->mc_list.count);
+               err = hw_ops->hw_multicast_list_set(self->aq_hw,
+                                                   self->mc_list.ar,
+                                                   self->mc_list.count);
+               if (err < 0)
+                       return err;
        }
        return aq_nic_set_packet_filter(self, packet_filter);
 }
index 3901d7994ca1571500253855c8d613706eacfa80..76bdbe1596d62f10ce3e60ed0de1f1e2175dc435 100644 (file)
@@ -313,6 +313,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                        break;
 
                                buff->is_error |= buff_->is_error;
+                               buff->is_cso_err |= buff_->is_cso_err;
 
                        } while (!buff_->is_eop);
 
@@ -320,7 +321,7 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
                                err = 0;
                                goto err_exit;
                        }
-                       if (buff->is_error) {
+                       if (buff->is_error || buff->is_cso_err) {
                                buff_ = buff;
                                do {
                                        next_ = buff_->next,
index 30f7fc4c97ff45c979c80155237da8b365628251..2ad3fa6316ce3271ecb7be8081fdcf3e505e98d0 100644 (file)
@@ -818,14 +818,15 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
                                     cfg->is_vlan_force_promisc);
 
        hw_atl_rpfl2multicast_flr_en_set(self,
-                                        IS_FILTER_ENABLED(IFF_ALLMULTI), 0);
+                                        IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+                                        IS_FILTER_ENABLED(IFF_MULTICAST), 0);
 
        hw_atl_rpfl2_accept_all_mc_packets_set(self,
-                                              IS_FILTER_ENABLED(IFF_ALLMULTI));
+                                             IS_FILTER_ENABLED(IFF_ALLMULTI) &&
+                                             IS_FILTER_ENABLED(IFF_MULTICAST));
 
        hw_atl_rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
 
-       cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
 
        for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
                hw_atl_rpfl2_uc_flr_en_set(self,
@@ -968,14 +969,26 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self)
 
 static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
 {
+       int err;
+       u32 val;
+
        hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
 
        /* Invalidate Descriptor Cache to prevent writing to the cached
         * descriptors and to the data pointer of those descriptors
         */
-       hw_atl_rdm_rx_dma_desc_cache_init_set(self, 1);
+       hw_atl_rdm_rx_dma_desc_cache_init_tgl(self);
 
-       return aq_hw_err_from_flags(self);
+       err = aq_hw_err_from_flags(self);
+
+       if (err)
+               goto err_exit;
+
+       readx_poll_timeout_atomic(hw_atl_rdm_rx_dma_desc_cache_init_done_get,
+                                 self, val, val == 1, 1000U, 10000U);
+
+err_exit:
+       return err;
 }
 
 static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
index 1149812ae463415298c959b0b1458b636e1d13ab..6f340695e6bd2383b316a440e3e9f1e8e20ca1a2 100644 (file)
@@ -606,12 +606,25 @@ void hw_atl_rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode
                            HW_ATL_RPB_RX_FC_MODE_SHIFT, rx_flow_ctl_mode);
 }
 
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init)
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw)
 {
+       u32 val;
+
+       val = aq_hw_read_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
+                                HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
+                                HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT);
+
        aq_hw_write_reg_bit(aq_hw, HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_ADR,
                            HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_MSK,
                            HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_SHIFT,
-                           init);
+                           val ^ 1);
+}
+
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw)
+{
+       return aq_hw_read_reg_bit(aq_hw, RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR,
+                                 RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK,
+                                 RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT);
 }
 
 void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
index 0c37abbabca54197caec421422c2e60ce3303f5e..c3ee278c3747c3635ab4035affe96b0addb6dc3b 100644 (file)
@@ -313,8 +313,11 @@ void hw_atl_rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
                                            u32 rx_pkt_buff_size_per_tc,
                                            u32 buffer);
 
-/* set rdm rx dma descriptor cache init */
-void hw_atl_rdm_rx_dma_desc_cache_init_set(struct aq_hw_s *aq_hw, u32 init);
+/* toggle rdm rx dma descriptor cache init */
+void hw_atl_rdm_rx_dma_desc_cache_init_tgl(struct aq_hw_s *aq_hw);
+
+/* get rdm rx dma descriptor cache init done */
+u32 hw_atl_rdm_rx_dma_desc_cache_init_done_get(struct aq_hw_s *aq_hw);
 
 /* set rx xoff enable (per tc) */
 void hw_atl_rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
index c3febcdfa92e519405e78ec360e18ff59017e00b..35887ad89025aa992ec16dc149e5ff0c12abd888 100644 (file)
 /* default value of bitfield rdm_desc_init_i */
 #define HW_ATL_RDM_RX_DMA_DESC_CACHE_INIT_DEFAULT 0x0
 
+/* rdm_desc_init_done_i bitfield definitions
+ * preprocessor definitions for the bitfield rdm_desc_init_done_i.
+ * port="pif_rdm_desc_init_done_i"
+ */
+
+/* register address for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_ADR 0x00005a10
+/* bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSK 0x00000001U
+/* inverted bitmask for bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_MSKN 0xfffffffe
+/* lower bit position of bitfield  rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_SHIFT 0U
+/* width of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_WIDTH 1
+/* default value of bitfield rdm_desc_init_done_i */
+#define RDM_RX_DMA_DESC_CACHE_INIT_DONE_DEFAULT 0x0
+
+
 /* rx int_desc_wrb_en bitfield definitions
  * preprocessor definitions for the bitfield "int_desc_wrb_en".
  * port="pif_rdm_int_desc_wrb_en_i"
index da726489e3c8debbe6c93fdb20d9006314c63a39..7bc51f8d6f2fcd0c0016e1cc6c3f8c04247455b7 100644 (file)
@@ -337,7 +337,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
        /* Convert PHY temperature from 1/256 degree Celsius
         * to 1/1000 degree Celsius.
         */
-       *temp = temp_res  * 1000 / 256;
+       *temp = (temp_res & 0xFFFF) * 1000 / 256;
 
        return 0;
 }
index 42d2e1b02c440b98971ef1f8798f84ac94f6eadf..664d664e09250ef94167485fe52914005de99e4d 100644 (file)
@@ -256,6 +256,9 @@ static int emac_rockchip_remove(struct platform_device *pdev)
        if (priv->regulator)
                regulator_disable(priv->regulator);
 
+       if (priv->soc_data->need_div_macclk)
+               clk_disable_unprepare(priv->macclk);
+
        free_netdev(ndev);
        return err;
 }
index 7548247455d70bcd92ac8eedf6f08b29a3f08c8b..1b1a09095c0dcfc3a40d127d306349e4ec690b26 100644 (file)
@@ -526,7 +526,7 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
        struct device *dev = &ag->pdev->dev;
        struct net_device *ndev = ag->ndev;
        static struct mii_bus *mii_bus;
-       struct device_node *np;
+       struct device_node *np, *mnp;
        int err;
 
        np = dev->of_node;
@@ -571,7 +571,9 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
                msleep(200);
        }
 
-       err = of_mdiobus_register(mii_bus, np);
+       mnp = of_get_child_by_name(np, "mdio");
+       err = of_mdiobus_register(mii_bus, mnp);
+       of_node_put(mnp);
        if (err)
                goto mdio_err_put_clk;
 
index e24f5d2b6afe35477940a4a6b86c55795f855a07..53055ce5dfd61853a2be432a150f9dfd9b3af84a 100644 (file)
@@ -8,7 +8,6 @@ config NET_VENDOR_BROADCOM
        default y
        depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
                   SIBYTE_SB1xxx_SOC
-       select DIMLIB
        ---help---
          If you have a network (Ethernet) chipset belonging to this class,
          say Y.
@@ -69,6 +68,7 @@ config BCMGENET
        select FIXED_PHY
        select BCM7XXX_PHY
        select MDIO_BCM_UNIMAC
+       select DIMLIB
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset.
@@ -188,6 +188,7 @@ config SYSTEMPORT
        select MII
        select PHYLIB
        select FIXED_PHY
+       select DIMLIB
        help
          This driver supports the built-in Ethernet MACs found in the
          Broadcom BCM7xxx Set Top Box family chipset using an internal
@@ -200,6 +201,7 @@ config BNXT
        select LIBCRC32C
        select NET_DEVLINK
        select PAGE_POOL
+       select DIMLIB
        ---help---
          This driver supports Broadcom NetXtreme-C/E 10/25/40/50 gigabit
          Ethernet cards.  To compile this driver as a module, choose M here:
index b4a8cf620a0c73d9b0d5e0eb0578ff1ca6d0988f..04ec909e06dfd72ec197871f8aff2701a3fc64bb 100644 (file)
@@ -10382,7 +10382,8 @@ static void bnxt_cleanup_pci(struct bnxt *bp)
 {
        bnxt_unmap_bars(bp, bp->pdev);
        pci_release_regions(bp->pdev);
-       pci_disable_device(bp->pdev);
+       if (pci_is_enabled(bp->pdev))
+               pci_disable_device(bp->pdev);
 }
 
 static void bnxt_init_dflt_coal(struct bnxt *bp)
@@ -10669,14 +10670,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
                bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
        }
        /* fall through */
-       case BNXT_FW_RESET_STATE_RESET_FW: {
-               u32 wait_dsecs = bp->fw_health->post_reset_wait_dsecs;
-
+       case BNXT_FW_RESET_STATE_RESET_FW:
                bnxt_reset_all(bp);
                bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
-               bnxt_queue_fw_reset_work(bp, wait_dsecs * HZ / 10);
+               bnxt_queue_fw_reset_work(bp, bp->fw_reset_min_dsecs * HZ / 10);
                return;
-       }
        case BNXT_FW_RESET_STATE_ENABLE_DEV:
                if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) &&
                    bp->fw_health) {
index e664392dccc0e3580bdb44943b4663894f671562..7151244f8c7d2f24f4fb5ad347aadabc799cee2e 100644 (file)
@@ -29,25 +29,20 @@ static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
        val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
        health_status = val & 0xffff;
 
-       if (health_status == BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Healthy;");
-               if (rc)
-                       return rc;
-       } else if (health_status < BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Not yet completed initialization;");
+       if (health_status < BNXT_FW_STATUS_HEALTHY) {
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Not yet completed initialization");
                if (rc)
                        return rc;
        } else if (health_status > BNXT_FW_STATUS_HEALTHY) {
-               rc = devlink_fmsg_string_pair_put(fmsg, "FW status",
-                                                 "Encountered fatal error and cannot recover;");
+               rc = devlink_fmsg_string_pair_put(fmsg, "Description",
+                                                 "Encountered fatal error and cannot recover");
                if (rc)
                        return rc;
        }
 
        if (val >> 16) {
-               rc = devlink_fmsg_u32_pair_put(fmsg, "Error", val >> 16);
+               rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
                if (rc)
                        return rc;
        }
@@ -215,25 +210,68 @@ enum bnxt_dl_param_id {
 
 static const struct bnxt_dl_nvm_param nvm_params[] = {
        {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX,
-        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10},
+        NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4},
        {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
-        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7},
+        NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4},
        {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK,
-        BNXT_NVM_SHARED_CFG, 1},
+        BNXT_NVM_SHARED_CFG, 1, 1},
 };
 
+union bnxt_nvm_data {
+       u8      val8;
+       __le32  val32;
+};
+
+static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst,
+                                 union devlink_param_value *src,
+                                 int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32 = 0;
+
+       if (nvm_num_bits == 1) {
+               dst->val8 = src->vbool;
+               return;
+       }
+       if (dl_num_bytes == 4)
+               val32 = src->vu32;
+       else if (dl_num_bytes == 2)
+               val32 = (u32)src->vu16;
+       else if (dl_num_bytes == 1)
+               val32 = (u32)src->vu8;
+       dst->val32 = cpu_to_le32(val32);
+}
+
+static void bnxt_copy_from_nvm_data(union devlink_param_value *dst,
+                                   union bnxt_nvm_data *src,
+                                   int nvm_num_bits, int dl_num_bytes)
+{
+       u32 val32;
+
+       if (nvm_num_bits == 1) {
+               dst->vbool = src->val8;
+               return;
+       }
+       val32 = le32_to_cpu(src->val32);
+       if (dl_num_bytes == 4)
+               dst->vu32 = val32;
+       else if (dl_num_bytes == 2)
+               dst->vu16 = (u16)val32;
+       else if (dl_num_bytes == 1)
+               dst->vu8 = (u8)val32;
+}
+
 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
                             int msg_len, union devlink_param_value *val)
 {
        struct hwrm_nvm_get_variable_input *req = msg;
-       void *data_addr = NULL, *buf = NULL;
        struct bnxt_dl_nvm_param nvm_param;
-       int bytesize, idx = 0, rc, i;
+       union bnxt_nvm_data *data;
        dma_addr_t data_dma_addr;
+       int idx = 0, rc, i;
 
        /* Get/Set NVM CFG parameter is supported only on PFs */
        if (BNXT_VF(bp))
@@ -254,47 +292,31 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
        else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
                idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID;
 
-       bytesize = roundup(nvm_param.num_bits, BITS_PER_BYTE) / BITS_PER_BYTE;
-       switch (bytesize) {
-       case 1:
-               if (nvm_param.num_bits == 1)
-                       buf = &val->vbool;
-               else
-                       buf = &val->vu8;
-               break;
-       case 2:
-               buf = &val->vu16;
-               break;
-       case 4:
-               buf = &val->vu32;
-               break;
-       default:
-               return -EFAULT;
-       }
-
-       data_addr = dma_alloc_coherent(&bp->pdev->dev, bytesize,
-                                      &data_dma_addr, GFP_KERNEL);
-       if (!data_addr)
+       data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data),
+                                 &data_dma_addr, GFP_KERNEL);
+       if (!data)
                return -ENOMEM;
 
        req->dest_data_addr = cpu_to_le64(data_dma_addr);
-       req->data_len = cpu_to_le16(nvm_param.num_bits);
+       req->data_len = cpu_to_le16(nvm_param.nvm_num_bits);
        req->option_num = cpu_to_le16(nvm_param.offset);
        req->index_0 = cpu_to_le16(idx);
        if (idx)
                req->dimensions = cpu_to_le16(1);
 
        if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) {
-               memcpy(data_addr, buf, bytesize);
+               bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits,
+                                     nvm_param.dl_num_bytes);
                rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT);
        } else {
                rc = hwrm_send_message_silent(bp, msg, msg_len,
                                              HWRM_CMD_TIMEOUT);
+               if (!rc)
+                       bnxt_copy_from_nvm_data(val, data,
+                                               nvm_param.nvm_num_bits,
+                                               nvm_param.dl_num_bytes);
        }
-       if (!rc && req->req_type == cpu_to_le16(HWRM_NVM_GET_VARIABLE))
-               memcpy(buf, data_addr, bytesize);
-
-       dma_free_coherent(&bp->pdev->dev, bytesize, data_addr, data_dma_addr);
+       dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr);
        if (rc == -EACCES)
                netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n");
        return rc;
index b97e0baeb42d6b2a07d9af7770e551429b90df1b..2f4fd0a7d04bfae168cc9aefc6e0faac0eacd7e4 100644 (file)
@@ -52,7 +52,8 @@ struct bnxt_dl_nvm_param {
        u16 id;
        u16 offset;
        u16 dir_type;
-       u16 num_bits;
+       u16 nvm_num_bits;
+       u8 dl_num_bytes;
 };
 
 void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
index 12cb77ef1081b6df50c0328eee989827bce8e5a2..0f138280315a82f753de8b1243a8ef2331228a31 100644 (file)
@@ -2018,6 +2018,8 @@ static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
         */
        if (priv->internal_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
+               if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+                       int0_enable |= UMAC_IRQ_PHY_DET_R;
        } else if (priv->ext_phy) {
                int0_enable |= UMAC_IRQ_LINK_EVENT;
        } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
@@ -2611,11 +2613,14 @@ static void bcmgenet_irq_task(struct work_struct *work)
        priv->irq0_stat = 0;
        spin_unlock_irq(&priv->lock);
 
+       if (status & UMAC_IRQ_PHY_DET_R &&
+           priv->dev->phydev->autoneg != AUTONEG_ENABLE)
+               phy_init_hw(priv->dev->phydev);
+
        /* Link UP/DOWN event */
-       if (status & UMAC_IRQ_LINK_EVENT) {
-               priv->dev->phydev->link = !!(status & UMAC_IRQ_LINK_UP);
+       if (status & UMAC_IRQ_LINK_EVENT)
                phy_mac_interrupt(priv->dev->phydev);
-       }
+
 }
 
 /* bcmgenet_isr1: handle Rx and Tx priority queues */
@@ -2710,7 +2715,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
 
        /* all other interested interrupts handled in bottom half */
-       status &= UMAC_IRQ_LINK_EVENT;
+       status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
        if (status) {
                /* Save irq status for bottom-half processing. */
                spin_lock_irqsave(&priv->lock, flags);
@@ -2874,6 +2879,12 @@ static int bcmgenet_open(struct net_device *dev)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
+       ret = bcmgenet_mii_connect(dev);
+       if (ret) {
+               netdev_err(dev, "failed to connect to PHY\n");
+               goto err_clk_disable;
+       }
+
        /* take MAC out of reset */
        bcmgenet_umac_reset(priv);
 
@@ -2883,6 +2894,12 @@ static int bcmgenet_open(struct net_device *dev)
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
+       ret = bcmgenet_mii_config(dev, true);
+       if (ret) {
+               netdev_err(dev, "unsupported PHY\n");
+               goto err_disconnect_phy;
+       }
+
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
        if (priv->internal_phy) {
@@ -2898,7 +2915,7 @@ static int bcmgenet_open(struct net_device *dev)
        ret = bcmgenet_init_dma(priv);
        if (ret) {
                netdev_err(dev, "failed to initialize DMA\n");
-               goto err_clk_disable;
+               goto err_disconnect_phy;
        }
 
        /* Always enable ring 16 - descriptor ring */
@@ -2921,25 +2938,19 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_irq0;
        }
 
-       ret = bcmgenet_mii_probe(dev);
-       if (ret) {
-               netdev_err(dev, "failed to connect to PHY\n");
-               goto err_irq1;
-       }
-
        bcmgenet_netif_start(dev);
 
        netif_tx_start_all_queues(dev);
 
        return 0;
 
-err_irq1:
-       free_irq(priv->irq1, priv);
 err_irq0:
        free_irq(priv->irq0, priv);
 err_fini_dma:
        bcmgenet_dma_teardown(priv);
        bcmgenet_fini_dma(priv);
+err_disconnect_phy:
+       phy_disconnect(dev->phydev);
 err_clk_disable:
        if (priv->internal_phy)
                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3620,6 +3631,8 @@ static int bcmgenet_resume(struct device *d)
        if (priv->internal_phy)
                bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
 
+       phy_init_hw(dev->phydev);
+
        bcmgenet_umac_reset(priv);
 
        init_umac(priv);
@@ -3628,8 +3641,6 @@ static int bcmgenet_resume(struct device *d)
        if (priv->wolopts)
                clk_disable_unprepare(priv->clk_wol);
 
-       phy_init_hw(dev->phydev);
-
        /* Speed settings must be restored */
        bcmgenet_mii_config(priv->dev, false);
 
index 4a8fc03d82fd4676731f6fb93c89d22c799fc64c..7fbf573d8d52a8ff45c1399ece76a3808490c624 100644 (file)
@@ -366,6 +366,7 @@ struct bcmgenet_mib_counters {
 #define  EXT_PWR_DOWN_PHY_EN           (1 << 20)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
+#define  RGMII_MODE_EN_V123            (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
 #define  RGMII_MODE_EN                 (1 << 6)
@@ -719,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
 
 /* MDIO routines */
 int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_connect(struct net_device *dev);
 int bcmgenet_mii_config(struct net_device *dev, bool init);
-int bcmgenet_mii_probe(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
 void bcmgenet_mii_setup(struct net_device *dev);
index 970e478a9017f8ef6437870c5601f9942bd44f29..17bb8d60a157cb4da2a792521c9bcb8aa97b851d 100644 (file)
@@ -173,6 +173,46 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
                                          bcmgenet_fixed_phy_link_update);
 }
 
+int bcmgenet_mii_connect(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device_node *dn = priv->pdev->dev.of_node;
+       struct phy_device *phydev;
+       u32 phy_flags = 0;
+       int ret;
+
+       /* Communicate the integrated PHY revision */
+       if (priv->internal_phy)
+               phy_flags = priv->gphy_rev;
+
+       /* Initialize link state variables that bcmgenet_mii_setup() uses */
+       priv->old_link = -1;
+       priv->old_speed = -1;
+       priv->old_duplex = -1;
+       priv->old_pause = -1;
+
+       if (dn) {
+               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
+                                       phy_flags, priv->phy_interface);
+               if (!phydev) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
+       } else {
+               phydev = dev->phydev;
+               phydev->dev_flags = phy_flags;
+
+               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
+                                        priv->phy_interface);
+               if (ret) {
+                       pr_err("could not attach to PHY\n");
+                       return -ENODEV;
+               }
+       }
+
+       return 0;
+}
+
 int bcmgenet_mii_config(struct net_device *dev, bool init)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -258,74 +298,29 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
         */
        if (priv->ext_phy) {
                reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-               reg |= RGMII_MODE_EN | id_mode_dis;
+               reg |= id_mode_dis;
+               if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
+                       reg |= RGMII_MODE_EN_V123;
+               else
+                       reg |= RGMII_MODE_EN;
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
        }
 
-       if (init)
-               dev_info(kdev, "configuring instance for %s\n", phy_name);
-
-       return 0;
-}
-
-int bcmgenet_mii_probe(struct net_device *dev)
-{
-       struct bcmgenet_priv *priv = netdev_priv(dev);
-       struct device_node *dn = priv->pdev->dev.of_node;
-       struct phy_device *phydev;
-       u32 phy_flags;
-       int ret;
-
-       /* Communicate the integrated PHY revision */
-       phy_flags = priv->gphy_rev;
-
-       /* Initialize link state variables that bcmgenet_mii_setup() uses */
-       priv->old_link = -1;
-       priv->old_speed = -1;
-       priv->old_duplex = -1;
-       priv->old_pause = -1;
-
-       if (dn) {
-               phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
-                                       phy_flags, priv->phy_interface);
-               if (!phydev) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       } else {
-               phydev = dev->phydev;
-               phydev->dev_flags = phy_flags;
+       if (init) {
+               linkmode_copy(phydev->advertising, phydev->supported);
 
-               ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
-                                        priv->phy_interface);
-               if (ret) {
-                       pr_err("could not attach to PHY\n");
-                       return -ENODEV;
-               }
-       }
+               /* The internal PHY has its link interrupts routed to the
+                * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
+                * that prevents the signaling of link UP interrupts when
+                * the link operates at 10Mbps, so fallback to polling for
+                * those versions of GENET.
+                */
+               if (priv->internal_phy && !GENET_IS_V5(priv))
+                       phydev->irq = PHY_IGNORE_INTERRUPT;
 
-       /* Configure port multiplexer based on what the probed PHY device since
-        * reading the 'max-speed' property determines the maximum supported
-        * PHY speed which is needed for bcmgenet_mii_config() to configure
-        * things appropriately.
-        */
-       ret = bcmgenet_mii_config(dev, true);
-       if (ret) {
-               phy_disconnect(dev->phydev);
-               return ret;
+               dev_info(kdev, "configuring instance for %s\n", phy_name);
        }
 
-       linkmode_copy(phydev->advertising, phydev->supported);
-
-       /* The internal PHY has its link interrupts routed to the
-        * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
-        * that prevents the signaling of link UP interrupts when
-        * the link operates at 10Mbps, so fallback to polling for
-        * those versions of GENET.
-        */
-       if (priv->internal_phy && !GENET_IS_V5(priv))
-               dev->phydev->irq = PHY_IGNORE_INTERRUPT;
-
        return 0;
 }
 
index 8e8d557901a970beea5287cf1ebfaa2ef1910f5b..1e1b774e19536611f54b641068bbfda9925c5601 100644 (file)
@@ -3405,17 +3405,17 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
                return err;
        }
 
-       *tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+       *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
        if (IS_ERR(*tx_clk))
-               *tx_clk = NULL;
+               return PTR_ERR(*tx_clk);
 
-       *rx_clk = devm_clk_get(&pdev->dev, "rx_clk");
+       *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
        if (IS_ERR(*rx_clk))
-               *rx_clk = NULL;
+               return PTR_ERR(*rx_clk);
 
-       *tsu_clk = devm_clk_get(&pdev->dev, "tsu_clk");
+       *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
        if (IS_ERR(*tsu_clk))
-               *tsu_clk = NULL;
+               return PTR_ERR(*tsu_clk);
 
        err = clk_prepare_enable(*pclk);
        if (err) {
index be2bafc7beeb3372f9aa5342420c3e3988d675a7..a04eccbc78e8aba124bcd5789a0e40c27ce3a230 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /* cavium_ptp.h - PTP 1588 clock on Cavium hardware
  * Copyright (c) 2003-2015, 2017 Cavium, Inc.
  */
index 5b602243d573fe745578e9390209a1813a1b6d57..86b528d8364c0712d8c26f684f140901463089a4 100644 (file)
@@ -137,13 +137,12 @@ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
 static int alloc_uld_rxqs(struct adapter *adap,
                          struct sge_uld_rxq_info *rxq_info, bool lro)
 {
-       struct sge *s = &adap->sge;
        unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
+       int i, err, msi_idx, que_idx = 0, bmap_idx = 0;
        struct sge_ofld_rxq *q = rxq_info->uldrxq;
        unsigned short *ids = rxq_info->rspq_id;
-       unsigned int bmap_idx = 0;
+       struct sge *s = &adap->sge;
        unsigned int per_chan;
-       int i, err, msi_idx, que_idx = 0;
 
        per_chan = rxq_info->nrxq / adap->params.nports;
 
@@ -161,6 +160,10 @@ static int alloc_uld_rxqs(struct adapter *adap,
 
                if (msi_idx >= 0) {
                        bmap_idx = get_msix_idx_from_bmap(adap);
+                       if (bmap_idx < 0) {
+                               err = -ENOSPC;
+                               goto freeout;
+                       }
                        msi_idx = adap->msix_info_ulds[bmap_idx].idx;
                }
                err = t4_sge_alloc_rxq(adap, &q->rspq, false,
@@ -692,10 +695,10 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
        lld->write_cmpl_support = adap->params.write_cmpl_support;
 }
 
-static void uld_attach(struct adapter *adap, unsigned int uld)
+static int uld_attach(struct adapter *adap, unsigned int uld)
 {
-       void *handle;
        struct cxgb4_lld_info lli;
+       void *handle;
 
        uld_init(adap, &lli);
        uld_queue_init(adap, uld, &lli);
@@ -705,7 +708,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
                dev_warn(adap->pdev_dev,
                         "could not attach to the %s driver, error %ld\n",
                         adap->uld[uld].name, PTR_ERR(handle));
-               return;
+               return PTR_ERR(handle);
        }
 
        adap->uld[uld].handle = handle;
@@ -713,22 +716,22 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
 
        if (adap->flags & CXGB4_FULL_INIT_DONE)
                adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
+
+       return 0;
 }
 
-/**
- *     cxgb4_register_uld - register an upper-layer driver
- *     @type: the ULD type
- *     @p: the ULD methods
+/* cxgb4_register_uld - register an upper-layer driver
+ * @type: the ULD type
+ * @p: the ULD methods
  *
- *     Registers an upper-layer driver with this driver and notifies the ULD
- *     about any presently available devices that support its type.  Returns
- *     %-EBUSY if a ULD of the same type is already registered.
+ * Registers an upper-layer driver with this driver and notifies the ULD
+ * about any presently available devices that support its type.
  */
 void cxgb4_register_uld(enum cxgb4_uld type,
                        const struct cxgb4_uld_info *p)
 {
-       int ret = 0;
        struct adapter *adap;
+       int ret = 0;
 
        if (type >= CXGB4_ULD_MAX)
                return;
@@ -760,8 +763,12 @@ void cxgb4_register_uld(enum cxgb4_uld type,
                if (ret)
                        goto free_irq;
                adap->uld[type] = *p;
-               uld_attach(adap, type);
+               ret = uld_attach(adap, type);
+               if (ret)
+                       goto free_txq;
                continue;
+free_txq:
+               release_sge_txq_uld(adap, type);
 free_irq:
                if (adap->flags & CXGB4_FULL_INIT_DONE)
                        quiesce_rx_uld(adap, type);
index b3da81e90132fd74d26b007ca5414a066547774f..928bfea5457bb2856e0781872e524476afec96cb 100644 (file)
@@ -3791,15 +3791,11 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
         * write the CIDX Updates into the Status Page at the end of the
         * TX Queue.
         */
-       c.autoequiqe_to_viid = htonl((dbqt
-                                     ? FW_EQ_ETH_CMD_AUTOEQUIQE_F
-                                     : FW_EQ_ETH_CMD_AUTOEQUEQE_F) |
+       c.autoequiqe_to_viid = htonl(FW_EQ_ETH_CMD_AUTOEQUEQE_F |
                                     FW_EQ_ETH_CMD_VIID_V(pi->viid));
 
        c.fetchszm_to_iqid =
-               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(dbqt
-                                                ? HOSTFCMODE_INGRESS_QUEUE_X
-                                                : HOSTFCMODE_STATUS_PAGE_X) |
+               htonl(FW_EQ_ETH_CMD_HOSTFCMODE_V(HOSTFCMODE_STATUS_PAGE_X) |
                      FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) |
                      FW_EQ_ETH_CMD_FETCHRO_F | FW_EQ_ETH_CMD_IQID_V(iqid));
 
index 0b12f89bf89a313f574f8e8a7295dbc112bcdeab..9fdf77d5eb3740982c28f5758b595aec33dbf692 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /* Register definitions for Gemini GMAC Ethernet device driver
  *
  * Copyright (C) 2006 Storlink, Corp.
index 9b7af94a40bb028414a178c1cc7f77daa125f3e5..96e9565f1e08a165ac48705265d93259d58b1060 100644 (file)
@@ -727,6 +727,18 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
         */
        nfrags = skb_shinfo(skb)->nr_frags;
 
+       /* Setup HW checksumming */
+       csum_vlan = 0;
+       if (skb->ip_summed == CHECKSUM_PARTIAL &&
+           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
+               goto drop;
+
+       /* Add VLAN tag */
+       if (skb_vlan_tag_present(skb)) {
+               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
+               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
+       }
+
        /* Get header len */
        len = skb_headlen(skb);
 
@@ -753,19 +765,6 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
        if (nfrags == 0)
                f_ctl_stat |= FTGMAC100_TXDES0_LTS;
        txdes->txdes3 = cpu_to_le32(map);
-
-       /* Setup HW checksumming */
-       csum_vlan = 0;
-       if (skb->ip_summed == CHECKSUM_PARTIAL &&
-           !ftgmac100_prep_tx_csum(skb, &csum_vlan))
-               goto drop;
-
-       /* Add VLAN tag */
-       if (skb_vlan_tag_present(skb)) {
-               csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
-               csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
-       }
-
        txdes->txdes1 = cpu_to_le32(csum_vlan);
 
        /* Next descriptor */
index 162d7d8fb2950c643af4c6d68d1bdafe44ffcf46..19379bae014430e3828f037dc7dfa3ea47a9a1d7 100644 (file)
@@ -1235,6 +1235,8 @@ static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
        priv->rx_td_enabled = enable;
 }
 
+static void update_tx_fqids(struct dpaa2_eth_priv *priv);
+
 static int link_state_update(struct dpaa2_eth_priv *priv)
 {
        struct dpni_link_state state = {0};
@@ -1261,6 +1263,7 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
                goto out;
 
        if (state.up) {
+               update_tx_fqids(priv);
                netif_carrier_on(priv->net_dev);
                netif_tx_start_all_queues(priv->net_dev);
        } else {
@@ -2533,6 +2536,47 @@ static int set_pause(struct dpaa2_eth_priv *priv)
        return 0;
 }
 
+static void update_tx_fqids(struct dpaa2_eth_priv *priv)
+{
+       struct dpni_queue_id qid = {0};
+       struct dpaa2_eth_fq *fq;
+       struct dpni_queue queue;
+       int i, j, err;
+
+       /* We only use Tx FQIDs for FQID-based enqueue, so check
+        * if DPNI version supports it before updating FQIDs
+        */
+       if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
+                                  DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
+               return;
+
+       for (i = 0; i < priv->num_fqs; i++) {
+               fq = &priv->fq[i];
+               if (fq->type != DPAA2_TX_CONF_FQ)
+                       continue;
+               for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
+                       err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
+                                            DPNI_QUEUE_TX, j, fq->flowid,
+                                            &queue, &qid);
+                       if (err)
+                               goto out_err;
+
+                       fq->tx_fqid[j] = qid.fqid;
+                       if (fq->tx_fqid[j] == 0)
+                               goto out_err;
+               }
+       }
+
+       priv->enqueue = dpaa2_eth_enqueue_fq;
+
+       return;
+
+out_err:
+       netdev_info(priv->net_dev,
+                   "Error reading Tx FQID, fallback to QDID-based enqueue\n");
+       priv->enqueue = dpaa2_eth_enqueue_qd;
+}
+
 /* Configure the DPNI object this interface is associated with */
 static int setup_dpni(struct fsl_mc_device *ls_dev)
 {
@@ -3306,6 +3350,9 @@ static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
        if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
                link_state_update(netdev_priv(net_dev));
 
+       if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED)
+               set_mac_addr(netdev_priv(net_dev));
+
        return IRQ_HANDLED;
 }
 
@@ -3331,7 +3378,8 @@ static int setup_irqs(struct fsl_mc_device *ls_dev)
        }
 
        err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
-                               DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
+                               DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
+                               DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
        if (err < 0) {
                dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
                goto free_irq;
index ff2e177395d49498cfefccb08314b8b98697e568..df2458a5e9ef5b0a444cccee404a89993d3c747e 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2018 NXP
  */
index fd583911b6c020ad026fe79eb3a820231b92b265..ee0711d06b3a78b0cdac507fa3b3e52feda0997c 100644 (file)
@@ -133,9 +133,12 @@ int dpni_reset(struct fsl_mc_io    *mc_io,
  */
 #define DPNI_IRQ_INDEX                         0
 /**
- * IRQ event - indicates a change in link state
+ * IRQ events:
+ *       indicates a change in link state
+ *       indicates a change in endpoint
  */
 #define DPNI_IRQ_EVENT_LINK_CHANGED            0x00000001
+#define DPNI_IRQ_EVENT_ENDPOINT_CHANGED                0x00000002
 
 int dpni_set_irq_enable(struct fsl_mc_io       *mc_io,
                        u32                     cmd_flags,
index 720cd50f5895ce77818fedd256ced50bbb1ab3cb..4ac05bfef338098b2bba5f1c697b36f816107ce8 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index be7914c1634dfcb2ba8658506ce6c504f0a36bc7..311c184e1aef675f02ea14892225ce0947d3baee 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright 2013-2016 Freescale Semiconductor Inc.
  * Copyright 2016-2018 NXP
index d4d4c72adf498e78ef8de5af9e6eb76e83ae4c4d..22c01b224baa8b500863e982373f04e09168c0ac 100644 (file)
@@ -3558,7 +3558,7 @@ fec_probe(struct platform_device *pdev)
 
        for (i = 0; i < irq_cnt; i++) {
                snprintf(irq_name, sizeof(irq_name), "int%d", i);
-               irq = platform_get_irq_byname(pdev, irq_name);
+               irq = platform_get_irq_byname_optional(pdev, irq_name);
                if (irq < 0)
                        irq = platform_get_irq(pdev, i);
                if (irq < 0) {
index 19e2365be7d8fa19020aa687608d26441329df30..945643c026155f83fcb38ecfac40d7ef37c2d888 100644 (file)
@@ -600,9 +600,9 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
 
        INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
 
-       irq = platform_get_irq_byname(pdev, "pps");
+       irq = platform_get_irq_byname_optional(pdev, "pps");
        if (irq < 0)
-               irq = platform_get_irq(pdev, irq_idx);
+               irq = platform_get_irq_optional(pdev, irq_idx);
        /* Failure to get an irq is not fatal,
         * only the PTP_CLOCK_PPS clock events should stop
         */
index 59564ac99d2a6df5c589d89521b5513f78ae16fc..edec61dfc8687fff6f4867e4463753d28aab202e 100644 (file)
@@ -289,6 +289,8 @@ static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
 
        len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
        page_info = &rx->data.page_info[idx];
+       dma_sync_single_for_cpu(&priv->pdev->dev, rx->data.qpl->page_buses[idx],
+                               PAGE_SIZE, DMA_FROM_DEVICE);
 
        /* gvnic can only receive into registered segments. If the buffer
         * can't be recycled, our only choice is to copy the data out of
index 778b87b5a06c255eaff6544302aca044240814f6..0a9a7ee2a866838cce439172af87edc6ef0106e7 100644 (file)
@@ -390,7 +390,21 @@ static void gve_tx_fill_seg_desc(union gve_tx_desc *seg_desc,
        seg_desc->seg.seg_addr = cpu_to_be64(addr);
 }
 
-static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
+static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses,
+                                   u64 iov_offset, u64 iov_len)
+{
+       dma_addr_t dma;
+       u64 addr;
+
+       for (addr = iov_offset; addr < iov_offset + iov_len;
+            addr += PAGE_SIZE) {
+               dma = page_buses[addr / PAGE_SIZE];
+               dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE);
+       }
+}
+
+static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb,
+                         struct device *dev)
 {
        int pad_bytes, hlen, hdr_nfrags, payload_nfrags, l4_hdr_offset;
        union gve_tx_desc *pkt_desc, *seg_desc;
@@ -432,6 +446,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
        skb_copy_bits(skb, 0,
                      tx->tx_fifo.base + info->iov[hdr_nfrags - 1].iov_offset,
                      hlen);
+       gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                               info->iov[hdr_nfrags - 1].iov_offset,
+                               info->iov[hdr_nfrags - 1].iov_len);
        copy_offset = hlen;
 
        for (i = payload_iov; i < payload_nfrags + payload_iov; i++) {
@@ -445,6 +462,9 @@ static int gve_tx_add_skb(struct gve_tx_ring *tx, struct sk_buff *skb)
                skb_copy_bits(skb, copy_offset,
                              tx->tx_fifo.base + info->iov[i].iov_offset,
                              info->iov[i].iov_len);
+               gve_dma_sync_for_device(dev, tx->tx_fifo.qpl->page_buses,
+                                       info->iov[i].iov_offset,
+                                       info->iov[i].iov_len);
                copy_offset += info->iov[i].iov_len;
        }
 
@@ -473,7 +493,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
                gve_tx_put_doorbell(priv, tx->q_resources, tx->req);
                return NETDEV_TX_BUSY;
        }
-       nsegs = gve_tx_add_skb(tx, skb);
+       nsegs = gve_tx_add_skb(tx, skb, &priv->pdev->dev);
 
        netdev_tx_sent_queue(tx->netdev_txq, skb->len);
        skb_tx_timestamp(skb);
index c84167447abe650c36186eae40343b2584ac4fbb..4606a7e4a6d19a2471adb6cdb2f2c5f810bc8c37 100644 (file)
@@ -237,6 +237,7 @@ struct hip04_priv {
        dma_addr_t rx_phys[RX_DESC_NUM];
        unsigned int rx_head;
        unsigned int rx_buf_size;
+       unsigned int rx_cnt_remaining;
 
        struct device_node *phy_node;
        struct phy_device *phy;
@@ -575,7 +576,6 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
        struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
        struct net_device *ndev = priv->ndev;
        struct net_device_stats *stats = &ndev->stats;
-       unsigned int cnt = hip04_recv_cnt(priv);
        struct rx_desc *desc;
        struct sk_buff *skb;
        unsigned char *buf;
@@ -588,8 +588,8 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
 
        /* clean up tx descriptors */
        tx_remaining = hip04_tx_reclaim(ndev, false);
-
-       while (cnt && !last) {
+       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+       while (priv->rx_cnt_remaining && !last) {
                buf = priv->rx_buf[priv->rx_head];
                skb = build_skb(buf, priv->rx_buf_size);
                if (unlikely(!skb)) {
@@ -635,11 +635,13 @@ refill:
                hip04_set_recv_desc(priv, phys);
 
                priv->rx_head = RX_NEXT(priv->rx_head);
-               if (rx >= budget)
+               if (rx >= budget) {
+                       --priv->rx_cnt_remaining;
                        goto done;
+               }
 
-               if (--cnt == 0)
-                       cnt = hip04_recv_cnt(priv);
+               if (--priv->rx_cnt_remaining == 0)
+                       priv->rx_cnt_remaining += hip04_recv_cnt(priv);
        }
 
        if (!(priv->reg_inten & RCV_INT)) {
@@ -724,6 +726,7 @@ static int hip04_mac_open(struct net_device *ndev)
        int i;
 
        priv->rx_head = 0;
+       priv->rx_cnt_remaining = 0;
        priv->tx_head = 0;
        priv->tx_tail = 0;
        hip04_reset_ppe(priv);
@@ -1038,7 +1041,6 @@ static int hip04_remove(struct platform_device *pdev)
 
        hip04_free_ring(ndev, d);
        unregister_netdev(ndev);
-       free_irq(ndev->irq, ndev);
        of_node_put(priv->phy_node);
        cancel_work_sync(&priv->tx_timeout_task);
        free_netdev(ndev);
index c4b7bf851a28bc0258b6e24acccfd8f5ddc00323..75ccc1e7076bde9a5bd681f0e6d3370f333f54db 100644 (file)
@@ -32,6 +32,8 @@
 
 #define HNAE3_MOD_VERSION "1.0"
 
+#define HNAE3_MIN_VECTOR_NUM   2 /* first one for misc, another for IO */
+
 /* Device IDs */
 #define HNAE3_DEV_ID_GE                                0xA220
 #define HNAE3_DEV_ID_25GE                      0xA221
index fd7f94372ff0dfae9d37103a8b4b8d93f7bed7aa..e02e01bd9effec698b740baa35e56a685eb325fb 100644 (file)
@@ -906,6 +906,9 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                                HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
 
+               /* nic's msix numbers is always equals to the roce's. */
+               hdev->num_nic_msi = hdev->num_roce_msi;
+
                /* PF should have NIC vectors and Roce vectors,
                 * NIC vectors are queued before Roce vectors.
                 */
@@ -915,6 +918,15 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
                hdev->num_msi =
                hnae3_get_field(__le16_to_cpu(req->pf_intr_vector_number),
                                HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+               hdev->num_nic_msi = hdev->num_msi;
+       }
+
+       if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+               dev_err(&hdev->pdev->dev,
+                       "Just %u msi resources, not enough for pf(min:2).\n",
+                       hdev->num_nic_msi);
+               return -EINVAL;
        }
 
        return 0;
@@ -1507,6 +1519,10 @@ static int  hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
        kinfo->rss_size = min_t(u16, hdev->rss_size_max,
                                vport->alloc_tqps / hdev->tm_info.num_tc);
 
+       /* ensure one to one mapping between irq and queue at default */
+       kinfo->rss_size = min_t(u16, kinfo->rss_size,
+                               (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
        return 0;
 }
 
@@ -2285,7 +2301,8 @@ static int hclge_init_msi(struct hclge_dev *hdev)
        int vectors;
        int i;
 
-       vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+       vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+                                       hdev->num_msi,
                                        PCI_IRQ_MSI | PCI_IRQ_MSIX);
        if (vectors < 0) {
                dev_err(&pdev->dev,
@@ -2300,6 +2317,7 @@ static int hclge_init_msi(struct hclge_dev *hdev)
 
        hdev->num_msi = vectors;
        hdev->num_msi_left = vectors;
+
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = hdev->base_msi_vector +
                                hdev->roce_base_msix_offset;
@@ -3903,6 +3921,7 @@ static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
        int alloc = 0;
        int i, j;
 
+       vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
        vector_num = min(hdev->num_msi_left, vector_num);
 
        for (j = 0; j < vector_num; j++) {
index 3e9574a9e22dcbac92ef06832ef266d3687a42f0..c3d56b872ed7342893c93a31524bf3844e1e3acf 100644 (file)
@@ -763,6 +763,7 @@ struct hclge_dev {
        u32 base_msi_vector;
        u16 *vector_status;
        int *vector_irq;
+       u16 num_nic_msi;        /* Num of nic vectors for this PF */
        u16 num_roce_msi;       /* Num of roce vectors for this PF */
        int roce_base_vector;
 
index 9f0e35f277895a00898fa475e2b70c5f6f17c8b3..62399cc1c5a630c4610ce044bd8d2054f8bde1b1 100644 (file)
@@ -537,9 +537,16 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
                kinfo->rss_size = kinfo->req_rss_size;
        } else if (kinfo->rss_size > max_rss_size ||
                   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+               /* if user not set rss, the rss_size should compare with the
+                * valid msi numbers to ensure one to one map between tqp and
+                * irq as default.
+                */
+               if (!kinfo->req_rss_size)
+                       max_rss_size = min_t(u16, max_rss_size,
+                                            (hdev->num_nic_msi - 1) /
+                                            kinfo->num_tc);
+
                /* Set to the maximum specification value (max_rss_size). */
-               dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
-                        kinfo->rss_size, max_rss_size);
                kinfo->rss_size = max_rss_size;
        }
 
index e3090b3dab1da412390a87cb4d417fb025489f72..7d7e712691b9216bac17925f37544f1d919bdab5 100644 (file)
@@ -411,6 +411,13 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
                kinfo->tqp[i] = &hdev->htqp[i].q;
        }
 
+       /* after init the max rss_size and tqps, adjust the default tqp numbers
+        * and rss size with the actual vector numbers
+        */
+       kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+       kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+                               kinfo->rss_size);
+
        return 0;
 }
 
@@ -502,6 +509,7 @@ static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
        int alloc = 0;
        int i, j;
 
+       vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
        vector_num = min(hdev->num_msi_left, vector_num);
 
        for (j = 0; j < vector_num; j++) {
@@ -2246,13 +2254,14 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
        int vectors;
        int i;
 
-       if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B))
+       if (hnae3_dev_roce_supported(hdev))
                vectors = pci_alloc_irq_vectors(pdev,
                                                hdev->roce_base_msix_offset + 1,
                                                hdev->num_msi,
                                                PCI_IRQ_MSIX);
        else
-               vectors = pci_alloc_irq_vectors(pdev, 1, hdev->num_msi,
+               vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+                                               hdev->num_msi,
                                                PCI_IRQ_MSI | PCI_IRQ_MSIX);
 
        if (vectors < 0) {
@@ -2268,6 +2277,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev)
 
        hdev->num_msi = vectors;
        hdev->num_msi_left = vectors;
+
        hdev->base_msi_vector = pdev->irq;
        hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
 
@@ -2533,7 +2543,7 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
 
        req = (struct hclgevf_query_res_cmd *)desc.data;
 
-       if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) {
+       if (hnae3_dev_roce_supported(hdev)) {
                hdev->roce_base_msix_offset =
                hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
                                HCLGEVF_MSIX_OFT_ROCEE_M,
@@ -2542,6 +2552,9 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
 
+               /* nic's msix numbers is always equals to the roce's. */
+               hdev->num_nic_msix = hdev->num_roce_msix;
+
                /* VF should have NIC vectors and Roce vectors, NIC vectors
                 * are queued before Roce vectors. The offset is fixed to 64.
                 */
@@ -2551,6 +2564,15 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
                hdev->num_msi =
                hnae3_get_field(__le16_to_cpu(req->vf_intr_vector_number),
                                HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+               hdev->num_nic_msix = hdev->num_msi;
+       }
+
+       if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+               dev_err(&hdev->pdev->dev,
+                       "Just %u msi resources, not enough for vf(min:2).\n",
+                       hdev->num_nic_msix);
+               return -EINVAL;
        }
 
        return 0;
index bdde3afc286b24996a8fc999a661269011d60047..2b8d6bc6d2245fede21d72c46329ffb275019202 100644 (file)
@@ -270,6 +270,7 @@ struct hclgevf_dev {
        u16 num_msi;
        u16 num_msi_left;
        u16 num_msi_used;
+       u16 num_nic_msix;       /* Num of nic vectors for this VF */
        u16 num_roce_msix;      /* Num of roce vectors for this VF */
        u16 roce_base_msix_offset;
        int roce_base_vector;
index 3e863a71c513678342017fc4f5636edd80e7de0e..7df5d7d211d47533055f17e9d50a2384cfd5e28f 100644 (file)
@@ -148,11 +148,15 @@ static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
 {
        u32 time_cnt;
        u32 reg_value;
+       int ret;
 
        regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
 
        for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
-               regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+               if (ret)
+                       return ret;
+
                reg_value &= st_msk;
                if ((!!check_st) == (!!reg_value))
                        break;
index 211c5f74b4c86682e817c7aee4748284b600594f..aec7e98bcc853a796e2f107768726907d70f9cec 100644 (file)
@@ -96,6 +96,8 @@
 
 #define OPT_SWAP_PORT  0x0001  /* Need to wordswp on the MPU port */
 
+#define LIB82596_DMA_ATTR      DMA_ATTR_NON_CONSISTENT
+
 #define DMA_WBACK(ndev, addr, len) \
        do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
 
@@ -200,7 +202,7 @@ static int __exit lan_remove_chip(struct parisc_device *pdev)
 
        unregister_netdev (dev);
        dma_free_attrs(&pdev->dev, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        free_netdev (dev);
        return 0;
 }
index 1274ad24d6af19dca829fcb4b241b0bb5fb17e0f..f9742af7f142d97a111318d7ca10cdf120ce2c72 100644 (file)
@@ -1065,7 +1065,7 @@ static int i82596_probe(struct net_device *dev)
 
        dma = dma_alloc_attrs(dev->dev.parent, sizeof(struct i596_dma),
                              &lp->dma_addr, GFP_KERNEL,
-                             DMA_ATTR_NON_CONSISTENT);
+                             LIB82596_DMA_ATTR);
        if (!dma) {
                printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
                return -ENOMEM;
@@ -1087,7 +1087,7 @@ static int i82596_probe(struct net_device *dev)
        i = register_netdev(dev);
        if (i) {
                dma_free_attrs(dev->dev.parent, sizeof(struct i596_dma),
-                              dma, lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                              dma, lp->dma_addr, LIB82596_DMA_ATTR);
                return i;
        }
 
index 6eb6c2ff7f099230c0416d09e4bb715c8628170b..6436a98c5953fcd92ad37e1436e784b53b39c709 100644 (file)
@@ -24,6 +24,8 @@
 
 static const char sni_82596_string[] = "snirm_82596";
 
+#define LIB82596_DMA_ATTR      0
+
 #define DMA_WBACK(priv, addr, len)     do { } while (0)
 #define DMA_INV(priv, addr, len)       do { } while (0)
 #define DMA_WBACK_INV(priv, addr, len) do { } while (0)
@@ -152,7 +154,7 @@ static int sni_82596_driver_remove(struct platform_device *pdev)
 
        unregister_netdev(dev);
        dma_free_attrs(dev->dev.parent, sizeof(struct i596_private), lp->dma,
-                      lp->dma_addr, DMA_ATTR_NON_CONSISTENT);
+                      lp->dma_addr, LIB82596_DMA_ATTR);
        iounmap(lp->ca);
        iounmap(lp->mpu_port);
        free_netdev (dev);
index 2b073a3c0b8474370e1ac072548bd68d0e4f7471..f59d9a8e35e2e7343cd31f017dc3e8fd5f958b96 100644 (file)
@@ -2878,12 +2878,10 @@ static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
 
        if (test_bit(0, &adapter->resetting) &&
            adapter->reset_reason == VNIC_RESET_MOBILITY) {
-               u64 val = (0xff000000) | scrq->hw_irq;
+               struct irq_desc *desc = irq_to_desc(scrq->irq);
+               struct irq_chip *chip = irq_desc_get_chip(desc);
 
-               rc = plpar_hcall_norets(H_EOI, val);
-               if (rc)
-                       dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n",
-                               val, rc);
+               chip->irq_eoi(&desc->irq_data);
        }
 
        rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
index 71d3d8854d8f124293399e5ac23aae6f328593fb..be56e631d693884a40568effdda9b2aa1bf2696d 100644 (file)
@@ -607,6 +607,7 @@ static int e1000_set_ringparam(struct net_device *netdev,
        for (i = 0; i < adapter->num_rx_queues; i++)
                rxdr[i].count = rxdr->count;
 
+       err = 0;
        if (netif_running(adapter->netdev)) {
                /* Try to get new resources before deleting old */
                err = e1000_setup_all_rx_resources(adapter);
@@ -627,14 +628,13 @@ static int e1000_set_ringparam(struct net_device *netdev,
                adapter->rx_ring = rxdr;
                adapter->tx_ring = txdr;
                err = e1000_up(adapter);
-               if (err)
-                       goto err_setup;
        }
        kfree(tx_old);
        kfree(rx_old);
 
        clear_bit(__E1000_RESETTING, &adapter->flags);
-       return 0;
+       return err;
+
 err_setup_tx:
        e1000_free_all_rx_resources(adapter);
 err_setup_rx:
@@ -646,7 +646,6 @@ err_alloc_rx:
 err_alloc_tx:
        if (netif_running(adapter->netdev))
                e1000_up(adapter);
-err_setup:
        clear_bit(__E1000_RESETTING, &adapter->flags);
        return err;
 }
index b1c3227ae4ab8b923af7ef270403a37a47e35a69..a05dfecdd9b4bda601667455255fd84b50e2b73a 100644 (file)
@@ -157,11 +157,6 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
                err = i40e_queue_pair_enable(vsi, qid);
                if (err)
                        return err;
-
-               /* Kick start the NAPI context so that receiving will start */
-               err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
-               if (err)
-                       return err;
        }
 
        return 0;
index 3ec2ce0725d536f2a2110f551e7f80371aaaf798..8a6ef351412921c729db50e686201b51ff4795ec 100644 (file)
@@ -466,7 +466,7 @@ static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
                        ? igb_setup_copper_link_82575
                        : igb_setup_serdes_link_82575;
 
-       if (mac->type == e1000_82580) {
+       if (mac->type == e1000_82580 || mac->type == e1000_i350) {
                switch (hw->device_id) {
                /* feature not supported on these id's */
                case E1000_DEV_ID_DH89XXCC_SGMII:
index 105b0624081a12bb6d375247856aacfa43bb6bd6..9148c62d9ac5ec03e10bd308bcc331663ea5729d 100644 (file)
@@ -753,7 +753,8 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg)
                struct net_device *netdev = igb->netdev;
                hw->hw_addr = NULL;
                netdev_err(netdev, "PCIe link lost\n");
-               WARN(1, "igb: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igb->pdev),
+                    "igb: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
@@ -2064,7 +2065,8 @@ static void igb_check_swap_media(struct igb_adapter *adapter)
        if ((hw->phy.media_type == e1000_media_type_copper) &&
            (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
                swap_now = true;
-       } else if (!(connsw & E1000_CONNSW_SERDESD)) {
+       } else if ((hw->phy.media_type != e1000_media_type_copper) &&
+                  !(connsw & E1000_CONNSW_SERDESD)) {
                /* copper signal takes time to appear */
                if (adapter->copper_tries < 4) {
                        adapter->copper_tries++;
@@ -2370,7 +2372,7 @@ void igb_reset(struct igb_adapter *adapter)
                adapter->ei.get_invariants(hw);
                adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
        }
-       if ((mac->type == e1000_82575) &&
+       if ((mac->type == e1000_82575 || mac->type == e1000_i350) &&
            (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
                igb_enable_mas(adapter);
        }
index 63b62d74f9610c744fbd7f6c0a1e2e768291dc19..8e424dfab12ea9eb20a88c35504923fc89a06f6a 100644 (file)
@@ -4047,7 +4047,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg)
                hw->hw_addr = NULL;
                netif_device_detach(netdev);
                netdev_err(netdev, "PCIe link lost, device now detached\n");
-               WARN(1, "igc: Failed to read reg 0x%x!\n", reg);
+               WARN(pci_device_is_present(igc->pdev),
+                    "igc: Failed to read reg 0x%x!\n", reg);
        }
 
        return value;
index 1ce2397306b9261826a736396ff5600fb6a115ec..91b3780ddb040de656e46a234f5c58e9b4fd71f2 100644 (file)
@@ -4310,7 +4310,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
                        set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
 
-               clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state);
                if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
                        continue;
 
index c8425d35c049bc8cf232eee65731ea0b166227cc..e47783ce77e08de2b935a983dc0c6bf782e0d5d9 100644 (file)
@@ -160,16 +160,23 @@ static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                             (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS));
 }
 #else
-void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
-                           struct mvneta_bm_pool *bm_pool, u8 port_map) {}
-void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-                        u8 port_map) {}
-int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
-                         struct mvneta_bm_pool *bm_pool) {return 0; }
-struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
-                                         enum mvneta_bm_type type, u8 port_id,
-                                         int pkt_size) { return NULL; }
+static inline void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
+                                         struct mvneta_bm_pool *bm_pool,
+                                         u8 port_map) {}
+static inline void mvneta_bm_bufs_free(struct mvneta_bm *priv,
+                                      struct mvneta_bm_pool *bm_pool,
+                                      u8 port_map) {}
+static inline int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
+{ return 0; }
+static inline int mvneta_bm_pool_refill(struct mvneta_bm *priv,
+                                       struct mvneta_bm_pool *bm_pool)
+{ return 0; }
+static inline struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv,
+                                                       u8 pool_id,
+                                                       enum mvneta_bm_type type,
+                                                       u8 port_id,
+                                                       int pkt_size)
+{ return NULL; }
 
 static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
                                         struct mvneta_bm_pool *bm_pool,
@@ -178,7 +185,8 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
 static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
                                        struct mvneta_bm_pool *bm_pool)
 { return 0; }
-struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
-void mvneta_bm_put(struct mvneta_bm *priv) {}
+static inline struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{ return NULL; }
+static inline void mvneta_bm_put(struct mvneta_bm *priv) {}
 #endif /* CONFIG_MVNETA_BM */
 #endif
index c61069340f4f23a26b825ab2a304e69fdbaddbba..703adb96429e24abe453e681f6f815ba1fb526de 100644 (file)
@@ -261,6 +261,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
                ge_mode = 0;
                switch (state->interface) {
                case PHY_INTERFACE_MODE_MII:
+               case PHY_INTERFACE_MODE_GMII:
                        ge_mode = 1;
                        break;
                case PHY_INTERFACE_MODE_REVMII:
index 4356f3a580027602b84f7d550b3b56244e757307..1187ef1375e298bc1ae309a72064308f6e382c34 100644 (file)
@@ -471,12 +471,31 @@ void mlx4_init_quotas(struct mlx4_dev *dev)
                priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
 }
 
-static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
+static int
+mlx4_calc_res_counter_guaranteed(struct mlx4_dev *dev,
+                                struct resource_allocator *res_alloc,
+                                int vf)
 {
-       /* reduce the sink counter */
-       return (dev->caps.max_counters - 1 -
-               (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
-               / MLX4_MAX_PORTS;
+       struct mlx4_active_ports actv_ports;
+       int ports, counters_guaranteed;
+
+       /* For master, only allocate according to the number of phys ports */
+       if (vf == mlx4_master_func_num(dev))
+               return MLX4_PF_COUNTERS_PER_PORT * dev->caps.num_ports;
+
+       /* calculate real number of ports for the VF */
+       actv_ports = mlx4_get_active_ports(dev, vf);
+       ports = bitmap_weight(actv_ports.ports, dev->caps.num_ports);
+       counters_guaranteed = ports * MLX4_VF_COUNTERS_PER_PORT;
+
+       /* If we do not have enough counters for this VF, do not
+        * allocate any for it. '-1' to reduce the sink counter.
+        */
+       if ((res_alloc->res_reserved + counters_guaranteed) >
+           (dev->caps.max_counters - 1))
+               return 0;
+
+       return counters_guaranteed;
 }
 
 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
@@ -484,7 +503,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
        struct mlx4_priv *priv = mlx4_priv(dev);
        int i, j;
        int t;
-       int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
 
        priv->mfunc.master.res_tracker.slave_list =
                kcalloc(dev->num_slaves, sizeof(struct slave_list),
@@ -603,16 +621,8 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev)
                                break;
                        case RES_COUNTER:
                                res_alloc->quota[t] = dev->caps.max_counters;
-                               if (t == mlx4_master_func_num(dev))
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_PF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else if (t <= max_vfs_guarantee_counter)
-                                       res_alloc->guaranteed[t] =
-                                               MLX4_VF_COUNTERS_PER_PORT *
-                                               MLX4_MAX_PORTS;
-                               else
-                                       res_alloc->guaranteed[t] = 0;
+                               res_alloc->guaranteed[t] =
+                                       mlx4_calc_res_counter_guaranteed(dev, res_alloc, t);
                                break;
                        default:
                                break;
index 8d76452cacdc0fa22135f5f42bb1f003ec7467c3..f1a7bc46f1c0c67a886e0a9ecb06f628b3bc505f 100644 (file)
@@ -345,7 +345,7 @@ struct mlx5e_tx_wqe_info {
        u8  num_wqebbs;
        u8  num_dma;
 #ifdef CONFIG_MLX5_EN_TLS
-       skb_frag_t *resync_dump_frag;
+       struct page *resync_dump_frag_page;
 #endif
 };
 
@@ -410,6 +410,7 @@ struct mlx5e_txqsq {
        struct device             *pdev;
        __be32                     mkey_be;
        unsigned long              state;
+       unsigned int               hw_mtu;
        struct hwtstamp_config    *tstamp;
        struct mlx5_clock         *clock;
 
index b3a249b2a482fbd65ee6e857c572d66727d4ad45..ac44bbe95c5c1b653366252662fddb54e92188fa 100644 (file)
@@ -141,7 +141,7 @@ int mlx5e_hv_vhca_stats_create(struct mlx5e_priv *priv)
                                    "Failed to create hv vhca stats agent, err = %ld\n",
                                    PTR_ERR(agent));
 
-               kfree(priv->stats_agent.buf);
+               kvfree(priv->stats_agent.buf);
                return IS_ERR_OR_NULL(agent);
        }
 
@@ -157,5 +157,5 @@ void mlx5e_hv_vhca_stats_destroy(struct mlx5e_priv *priv)
                return;
 
        mlx5_hv_vhca_agent_destroy(priv->stats_agent.agent);
-       kfree(priv->stats_agent.buf);
+       kvfree(priv->stats_agent.buf);
 }
index f8ee18b4da6fa92818f515a30de826d183558475..13af72556987ffe6ffd5dbf2a32d240a1a545cc0 100644 (file)
@@ -97,15 +97,19 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
        if (ret)
                return ret;
 
-       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET)
+       if (mlx5_lag_is_multipath(mdev) && rt->rt_gw_family != AF_INET) {
+               ip_rt_put(rt);
                return -ENETUNREACH;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
 
        ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               ip_rt_put(rt);
                return ret;
+       }
 
        if (!(*out_ttl))
                *out_ttl = ip4_dst_hoplimit(&rt->dst);
@@ -149,8 +153,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
                *out_ttl = ip6_dst_hoplimit(dst);
 
        ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev);
-       if (ret < 0)
+       if (ret < 0) {
+               dst_release(dst);
                return ret;
+       }
 #else
        return -EOPNOTSUPP;
 #endif
index 87be9674790292beceddd8908e0d52d07a71c4e1..7c8796d9743fa5a6c66f4aa3e0017b800a121a9b 100644 (file)
 #else
 /* TLS offload requires additional stop_room for:
  *  - a resync SKB.
- * kTLS offload requires additional stop_room for:
- * - static params WQE,
- * - progress params WQE, and
- * - resync DUMP per frag.
+ * kTLS offload requires fixed additional stop_room for:
+ * - a static params WQE, and a progress params WQE.
+ * The additional MTU-depending room for the resync DUMP WQEs
+ * will be calculated and added in runtime.
  */
 #define MLX5E_SQ_TLS_ROOM  \
        (MLX5_SEND_WQE_MAX_WQEBBS + \
-        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS + \
-        MAX_SKB_FRAGS * MLX5E_KTLS_MAX_DUMP_WQEBBS)
+        MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS)
 #endif
 
 #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start))
@@ -92,7 +91,7 @@ mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq,
 
        /* fill sq frag edge with nops to avoid wqe wrapping two pages */
        for (; wi < edge_wi; wi++) {
-               wi->skb        = NULL;
+               memset(wi, 0, sizeof(*wi));
                wi->num_wqebbs = 1;
                mlx5e_post_nop(wq, sq->sqn, &sq->pc);
        }
index d2ff74d52720d2ba369c143f086922185117086e..46725cd743a369b6531b2dc3fd8051e2129f266e 100644 (file)
@@ -38,7 +38,7 @@ static int mlx5e_ktls_add(struct net_device *netdev, struct sock *sk,
                return -ENOMEM;
 
        tx_priv->expected_seq = start_offload_tcp_sn;
-       tx_priv->crypto_info  = crypto_info;
+       tx_priv->crypto_info  = *(struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        mlx5e_set_ktls_tx_priv_ctx(tls_ctx, tx_priv);
 
        /* tc and underlay_qpn values are not in use for tls tis */
index b7298f9ee3d3f7d7a2adcb5cd2895d5ab2a0d78f..a3efa29a4629d796876452368502c3d10a9b12b5 100644 (file)
         MLX5_ST_SZ_BYTES(tls_progress_params))
 #define MLX5E_KTLS_PROGRESS_WQEBBS \
        (DIV_ROUND_UP(MLX5E_KTLS_PROGRESS_WQE_SZ, MLX5_SEND_WQE_BB))
-#define MLX5E_KTLS_MAX_DUMP_WQEBBS 2
+
+struct mlx5e_dump_wqe {
+       struct mlx5_wqe_ctrl_seg ctrl;
+       struct mlx5_wqe_data_seg data;
+};
+
+#define MLX5E_KTLS_DUMP_WQEBBS \
+       (DIV_ROUND_UP(sizeof(struct mlx5e_dump_wqe), MLX5_SEND_WQE_BB))
 
 enum {
        MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD     = 0,
@@ -37,7 +44,7 @@ enum {
 
 struct mlx5e_ktls_offload_context_tx {
        struct tls_offload_context_tx *tx_ctx;
-       struct tls_crypto_info *crypto_info;
+       struct tls12_crypto_info_aes_gcm_128 crypto_info;
        u32 expected_seq;
        u32 tisn;
        u32 key_id;
@@ -86,14 +93,28 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
                                         struct mlx5e_tx_wqe **wqe, u16 *pi);
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma);
-
+                                          u32 *dma_fifo_cc);
+static inline u8
+mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags,
+                           unsigned int sync_len)
+{
+       /* Given the MTU and sync_len, calculates an upper bound for the
+        * number of WQEBBs needed for the TX resync DUMP WQEs of a record.
+        */
+       return MLX5E_KTLS_DUMP_WQEBBS *
+               (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu));
+}
 #else
 
 static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
 {
 }
 
+static inline void
+mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
+                                     struct mlx5e_tx_wqe_info *wi,
+                                     u32 *dma_fifo_cc) {}
+
 #endif
 
 #endif /* __MLX5E_TLS_H__ */
index d195366461c9f21df74ad755d8be36efd7708f2c..778dab1af8fc659cae40ead517bc94dc22f68a4a 100644 (file)
@@ -24,17 +24,12 @@ enum {
 static void
 fill_static_params_ctx(void *ctx, struct mlx5e_ktls_offload_context_tx *priv_tx)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        char *initial_rn, *gcm_iv;
        u16 salt_sz, rec_seq_sz;
        char *salt, *rec_seq;
        u8 tls_version;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        EXTRACT_INFO_FIELDS;
 
        gcm_iv      = MLX5_ADDR_OF(tls_static_params, ctx, gcm_iv);
@@ -108,16 +103,15 @@ build_progress_params(struct mlx5e_tx_wqe *wqe, u16 pc, u32 sqn,
 }
 
 static void tx_fill_wi(struct mlx5e_txqsq *sq,
-                      u16 pi, u8 num_wqebbs,
-                      skb_frag_t *resync_dump_frag,
-                      u32 num_bytes)
+                      u16 pi, u8 num_wqebbs, u32 num_bytes,
+                      struct page *page)
 {
        struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi];
 
-       wi->skb              = NULL;
-       wi->num_wqebbs       = num_wqebbs;
-       wi->resync_dump_frag = resync_dump_frag;
-       wi->num_bytes        = num_bytes;
+       memset(wi, 0, sizeof(*wi));
+       wi->num_wqebbs = num_wqebbs;
+       wi->num_bytes  = num_bytes;
+       wi->resync_dump_frag_page = page;
 }
 
 void mlx5e_ktls_tx_offload_set_pending(struct mlx5e_ktls_offload_context_tx *priv_tx)
@@ -145,7 +139,7 @@ post_static_params(struct mlx5e_txqsq *sq,
 
        umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi);
        build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_STATIC_WQEBBS;
 }
 
@@ -159,7 +153,7 @@ post_progress_params(struct mlx5e_txqsq *sq,
 
        wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi);
        build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence);
-       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, NULL, 0);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL);
        sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS;
 }
 
@@ -169,6 +163,14 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq,
                              bool skip_static_post, bool fence_first_post)
 {
        bool progress_fence = skip_static_post || !fence_first_post;
+       struct mlx5_wq_cyc *wq = &sq->wq;
+       u16 contig_wqebbs_room, pi;
+
+       pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+       contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+       if (unlikely(contig_wqebbs_room <
+                    MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS))
+               mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        if (!skip_static_post)
                post_static_params(sq, priv_tx, fence_first_post);
@@ -180,29 +182,36 @@ struct tx_sync_info {
        u64 rcd_sn;
        s32 sync_len;
        int nr_frags;
-       skb_frag_t *frags[MAX_SKB_FRAGS];
+       skb_frag_t frags[MAX_SKB_FRAGS];
+};
+
+enum mlx5e_ktls_sync_retval {
+       MLX5E_KTLS_SYNC_DONE,
+       MLX5E_KTLS_SYNC_FAIL,
+       MLX5E_KTLS_SYNC_SKIP_NO_DATA,
 };
 
-static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
-                            u32 tcp_seq, struct tx_sync_info *info)
+static enum mlx5e_ktls_sync_retval
+tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
+                u32 tcp_seq, struct tx_sync_info *info)
 {
        struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx;
+       enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE;
        struct tls_record_info *record;
        int remaining, i = 0;
        unsigned long flags;
-       bool ret = true;
 
        spin_lock_irqsave(&tx_ctx->lock, flags);
        record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn);
 
        if (unlikely(!record)) {
-               ret = false;
+               ret = MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
        if (unlikely(tcp_seq < tls_record_start_seq(record))) {
-               if (!tls_record_is_start_marker(record))
-                       ret = false;
+               ret = tls_record_is_start_marker(record) ?
+                       MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL;
                goto out;
        }
 
@@ -211,13 +220,13 @@ static bool tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx,
        while (remaining > 0) {
                skb_frag_t *frag = &record->frags[i];
 
-               __skb_frag_ref(frag);
+               get_page(skb_frag_page(frag));
                remaining -= skb_frag_size(frag);
-               info->frags[i++] = frag;
+               info->frags[i++] = *frag;
        }
        /* reduce the part which will be sent with the original SKB */
        if (remaining < 0)
-               skb_frag_size_add(info->frags[i - 1], remaining);
+               skb_frag_size_add(&info->frags[i - 1], remaining);
        info->nr_frags = i;
 out:
        spin_unlock_irqrestore(&tx_ctx->lock, flags);
@@ -229,17 +238,12 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
                      struct mlx5e_ktls_offload_context_tx *priv_tx,
                      u64 rcd_sn)
 {
-       struct tls_crypto_info *crypto_info = priv_tx->crypto_info;
-       struct tls12_crypto_info_aes_gcm_128 *info;
+       struct tls12_crypto_info_aes_gcm_128 *info = &priv_tx->crypto_info;
        __be64 rn_be = cpu_to_be64(rcd_sn);
        bool skip_static_post;
        u16 rec_seq_sz;
        char *rec_seq;
 
-       if (WARN_ON(crypto_info->cipher_type != TLS_CIPHER_AES_GCM_128))
-               return;
-
-       info = (struct tls12_crypto_info_aes_gcm_128 *)crypto_info;
        rec_seq = info->rec_seq;
        rec_seq_sz = sizeof(info->rec_seq);
 
@@ -250,11 +254,6 @@ tx_post_resync_params(struct mlx5e_txqsq *sq,
        mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true);
 }
 
-struct mlx5e_dump_wqe {
-       struct mlx5_wqe_ctrl_seg ctrl;
-       struct mlx5_wqe_data_seg data;
-};
-
 static int
 tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first)
 {
@@ -262,7 +261,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        struct mlx5_wqe_data_seg *dseg;
        struct mlx5e_dump_wqe *wqe;
        dma_addr_t dma_addr = 0;
-       u8  num_wqebbs;
        u16 ds_cnt;
        int fsz;
        u16 pi;
@@ -270,7 +268,6 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi);
 
        ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
-       num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 
        cseg = &wqe->ctrl;
        dseg = &wqe->data;
@@ -291,24 +288,27 @@ tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool fir
        dseg->byte_count = cpu_to_be32(fsz);
        mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 
-       tx_fill_wi(sq, pi, num_wqebbs, frag, fsz);
-       sq->pc += num_wqebbs;
-
-       WARN(num_wqebbs > MLX5E_KTLS_MAX_DUMP_WQEBBS,
-            "unexpected DUMP num_wqebbs, %d > %d",
-            num_wqebbs, MLX5E_KTLS_MAX_DUMP_WQEBBS);
+       tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag));
+       sq->pc += MLX5E_KTLS_DUMP_WQEBBS;
 
        return 0;
 }
 
 void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
                                           struct mlx5e_tx_wqe_info *wi,
-                                          struct mlx5e_sq_dma *dma)
+                                          u32 *dma_fifo_cc)
 {
-       struct mlx5e_sq_stats *stats = sq->stats;
+       struct mlx5e_sq_stats *stats;
+       struct mlx5e_sq_dma *dma;
+
+       if (!wi->resync_dump_frag_page)
+               return;
+
+       dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++);
+       stats = sq->stats;
 
        mlx5e_tx_dma_unmap(sq->pdev, dma);
-       __skb_frag_unref(wi->resync_dump_frag);
+       put_page(wi->resync_dump_frag_page);
        stats->tls_dump_packets++;
        stats->tls_dump_bytes += wi->num_bytes;
 }
@@ -318,25 +318,31 @@ static void tx_post_fence_nop(struct mlx5e_txqsq *sq)
        struct mlx5_wq_cyc *wq = &sq->wq;
        u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
 
-       tx_fill_wi(sq, pi, 1, NULL, 0);
+       tx_fill_wi(sq, pi, 1, 0, NULL);
 
        mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc);
 }
 
-static struct sk_buff *
+static enum mlx5e_ktls_sync_retval
 mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
                         struct mlx5e_txqsq *sq,
-                        struct sk_buff *skb,
+                        int datalen,
                         u32 seq)
 {
        struct mlx5e_sq_stats *stats = sq->stats;
        struct mlx5_wq_cyc *wq = &sq->wq;
+       enum mlx5e_ktls_sync_retval ret;
        struct tx_sync_info info = {};
        u16 contig_wqebbs_room, pi;
        u8 num_wqebbs;
-       int i;
-
-       if (!tx_sync_info_get(priv_tx, seq, &info)) {
+       int i = 0;
+
+       ret = tx_sync_info_get(priv_tx, seq, &info);
+       if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) {
+               if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) {
+                       stats->tls_skip_no_sync_data++;
+                       return MLX5E_KTLS_SYNC_SKIP_NO_DATA;
+               }
                /* We might get here if a retransmission reaches the driver
                 * after the relevant record is acked.
                 * It should be safe to drop the packet in this case
@@ -346,13 +352,8 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
        }
 
        if (unlikely(info.sync_len < 0)) {
-               u32 payload;
-               int headln;
-
-               headln = skb_transport_offset(skb) + tcp_hdrlen(skb);
-               payload = skb->len - headln;
-               if (likely(payload <= -info.sync_len))
-                       return skb;
+               if (likely(datalen <= -info.sync_len))
+                       return MLX5E_KTLS_SYNC_DONE;
 
                stats->tls_drop_bypass_req++;
                goto err_out;
@@ -360,30 +361,62 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx,
 
        stats->tls_ooo++;
 
-       num_wqebbs = MLX5E_KTLS_STATIC_WQEBBS + MLX5E_KTLS_PROGRESS_WQEBBS +
-               (info.nr_frags ? info.nr_frags * MLX5E_KTLS_MAX_DUMP_WQEBBS : 1);
+       tx_post_resync_params(sq, priv_tx, info.rcd_sn);
+
+       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
+        * actual data xmit.
+        */
+       if (!info.nr_frags) {
+               tx_post_fence_nop(sq);
+               return MLX5E_KTLS_SYNC_DONE;
+       }
+
+       num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len);
        pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
        contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
+
        if (unlikely(contig_wqebbs_room < num_wqebbs))
                mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
 
        tx_post_resync_params(sq, priv_tx, info.rcd_sn);
 
-       for (i = 0; i < info.nr_frags; i++)
-               if (tx_post_resync_dump(sq, info.frags[i], priv_tx->tisn, !i))
-                       goto err_out;
+       for (; i < info.nr_frags; i++) {
+               unsigned int orig_fsz, frag_offset = 0, n = 0;
+               skb_frag_t *f = &info.frags[i];
 
-       /* If no dump WQE was sent, we need to have a fence NOP WQE before the
-        * actual data xmit.
-        */
-       if (!info.nr_frags)
-               tx_post_fence_nop(sq);
+               orig_fsz = skb_frag_size(f);
 
-       return skb;
+               do {
+                       bool fence = !(i || frag_offset);
+                       unsigned int fsz;
+
+                       n++;
+                       fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset);
+                       skb_frag_size_set(f, fsz);
+                       if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) {
+                               page_ref_add(skb_frag_page(f), n - 1);
+                               goto err_out;
+                       }
+
+                       skb_frag_off_add(f, fsz);
+                       frag_offset += fsz;
+               } while (frag_offset < orig_fsz);
+
+               page_ref_add(skb_frag_page(f), n - 1);
+       }
+
+       return MLX5E_KTLS_SYNC_DONE;
 
 err_out:
-       dev_kfree_skb_any(skb);
-       return NULL;
+       for (; i < info.nr_frags; i++)
+               /* The put_page() here undoes the page ref obtained in tx_sync_info_get().
+                * Page refs obtained for the DUMP WQEs above (by page_ref_add) will be
+                * released only upon their completions (or in mlx5e_free_txqsq_descs,
+                * if channel closes).
+                */
+               put_page(skb_frag_page(&info.frags[i]));
+
+       return MLX5E_KTLS_SYNC_FAIL;
 }
 
 struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
@@ -419,10 +452,15 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev,
 
        seq = ntohl(tcp_hdr(skb)->seq);
        if (unlikely(priv_tx->expected_seq != seq)) {
-               skb = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, skb, seq);
-               if (unlikely(!skb))
+               enum mlx5e_ktls_sync_retval ret =
+                       mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq);
+
+               if (likely(ret == MLX5E_KTLS_SYNC_DONE))
+                       *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
+               else if (ret == MLX5E_KTLS_SYNC_FAIL)
+                       goto err_out;
+               else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */
                        goto out;
-               *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi);
        }
 
        priv_tx->expected_seq = seq + datalen;
index c5a9c20d7f006f554afea951f14da6b06ed8ca57..327c93a7bd55a843dfb16bf27ccdacf61e69efc0 100644 (file)
@@ -1021,7 +1021,7 @@ static bool ext_link_mode_requested(const unsigned long *adver)
 {
 #define MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
        int size = __ETHTOOL_LINK_MODE_MASK_NBITS - MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT;
-       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes);
+       __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = {0,};
 
        bitmap_set(modes, MLX5E_MIN_PTYS_EXT_LINK_MODE_BIT, size);
        return bitmap_intersects(modes, adver, __ETHTOOL_LINK_MODE_MASK_NBITS);
index 7569287f8f3c2129a92f6f2f0552051dd3762571..772bfdbdeb9c601f93354a654f02163c6db3fc17 100644 (file)
@@ -1128,6 +1128,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
+       sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        sq->stop_room = MLX5E_SQ_STOP_ROOM;
        INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
@@ -1135,10 +1136,14 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
+#ifdef CONFIG_MLX5_EN_TLS
        if (mlx5_accel_is_tls_device(c->priv->mdev)) {
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
-               sq->stop_room += MLX5E_SQ_TLS_ROOM;
+               sq->stop_room += MLX5E_SQ_TLS_ROOM +
+                       mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS,
+                                                   TLS_MAX_PAYLOAD_SIZE);
        }
+#endif
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@ -1349,9 +1354,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        /* last doorbell out, godspeed .. */
        if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
                u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+               struct mlx5e_tx_wqe_info *wi;
                struct mlx5e_tx_wqe *nop;
 
-               sq->db.wqe_info[pi].skb = NULL;
+               wi = &sq->db.wqe_info[pi];
+
+               memset(wi, 0, sizeof(*wi));
+               wi->num_wqebbs = 1;
                nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
                mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
index 95892a3b63a13578e5532f63c96cadf0edaa434e..cd9bb7c7b3413651a9835925d69305299d007dcf 100644 (file)
@@ -611,8 +611,8 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
 
        mutex_lock(&esw->offloads.encap_tbl_lock);
        encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
-       if (e->compl_result || (encap_connected == neigh_connected &&
-                               ether_addr_equal(e->h_dest, ha)))
+       if (e->compl_result < 0 || (encap_connected == neigh_connected &&
+                                   ether_addr_equal(e->h_dest, ha)))
                goto unlock;
 
        mlx5e_take_all_encap_flows(e, &flow_list);
index d6a547238de03fd9778d7f786346c00647963c68..82cffb3a99640f25fa6f6c0a8f52151194874058 100644 (file)
@@ -1386,8 +1386,11 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
        if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
                return 0;
 
-       if (rq->cqd.left)
+       if (rq->cqd.left) {
                work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget);
+               if (rq->cqd.left || work_done >= budget)
+                       goto out;
+       }
 
        cqe = mlx5_cqwq_get_cqe(cqwq);
        if (!cqe) {
index 840ec945ccbab466863fb6f43a29bbfd23d08c4a..bbff8d8ded767ce411dca360fe43ef5a3942ef26 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/udp.h>
 #include <net/udp.h>
 #include "en.h"
+#include "en/port.h"
 
 enum {
        MLX5E_ST_LINK_STATE,
@@ -80,22 +81,12 @@ static int mlx5e_test_link_state(struct mlx5e_priv *priv)
 
 static int mlx5e_test_link_speed(struct mlx5e_priv *priv)
 {
-       u32 out[MLX5_ST_SZ_DW(ptys_reg)];
-       u32 eth_proto_oper;
-       int i;
+       u32 speed;
 
        if (!netif_carrier_ok(priv->netdev))
                return 1;
 
-       if (mlx5_query_port_ptys(priv->mdev, out, sizeof(out), MLX5_PTYS_EN, 1))
-               return 1;
-
-       eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
-       for (i = 0; i < MLX5E_LINK_MODES_NUMBER; i++) {
-               if (eth_proto_oper & MLX5E_PROT_MASK(i))
-                       return 0;
-       }
-       return 1;
+       return mlx5e_port_linkspeed(priv->mdev, &speed);
 }
 
 struct mlx5ehdr {
index ac6fdcda7019b28c92c11bc1257705e23698e988..7e6ebd0505cc044963493e58563f45661b652441 100644 (file)
@@ -52,11 +52,12 @@ static const struct counter_desc sw_stats_desc[] = {
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_encrypted_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ctx) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_ooo) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_resync_bytes) },
+       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_skip_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_no_sync_data) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_drop_bypass_req) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_packets) },
-       { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tls_dump_bytes) },
 #endif
 
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
@@ -288,11 +289,12 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
                        s->tx_tls_encrypted_bytes   += sq_stats->tls_encrypted_bytes;
                        s->tx_tls_ctx               += sq_stats->tls_ctx;
                        s->tx_tls_ooo               += sq_stats->tls_ooo;
+                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
+                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
                        s->tx_tls_resync_bytes      += sq_stats->tls_resync_bytes;
+                       s->tx_tls_skip_no_sync_data += sq_stats->tls_skip_no_sync_data;
                        s->tx_tls_drop_no_sync_data += sq_stats->tls_drop_no_sync_data;
                        s->tx_tls_drop_bypass_req   += sq_stats->tls_drop_bypass_req;
-                       s->tx_tls_dump_bytes        += sq_stats->tls_dump_bytes;
-                       s->tx_tls_dump_packets      += sq_stats->tls_dump_packets;
 #endif
                        s->tx_cqes              += sq_stats->cqes;
                }
@@ -1472,10 +1474,12 @@ static const struct counter_desc sq_stats_desc[] = {
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_encrypted_bytes) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ctx) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_ooo) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
-       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_packets) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_dump_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_resync_bytes) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_skip_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_no_sync_data) },
+       { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tls_drop_bypass_req) },
 #endif
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
        { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
index 79f261bf86ac8e54932b7a0ef2c925fd2fd54d3c..869f3502f6312845e35d888f972a5a78a8a3b84e 100644 (file)
@@ -129,11 +129,12 @@ struct mlx5e_sw_stats {
        u64 tx_tls_encrypted_bytes;
        u64 tx_tls_ctx;
        u64 tx_tls_ooo;
+       u64 tx_tls_dump_packets;
+       u64 tx_tls_dump_bytes;
        u64 tx_tls_resync_bytes;
+       u64 tx_tls_skip_no_sync_data;
        u64 tx_tls_drop_no_sync_data;
        u64 tx_tls_drop_bypass_req;
-       u64 tx_tls_dump_packets;
-       u64 tx_tls_dump_bytes;
 #endif
 
        u64 rx_xsk_packets;
@@ -273,11 +274,12 @@ struct mlx5e_sq_stats {
        u64 tls_encrypted_bytes;
        u64 tls_ctx;
        u64 tls_ooo;
+       u64 tls_dump_packets;
+       u64 tls_dump_bytes;
        u64 tls_resync_bytes;
+       u64 tls_skip_no_sync_data;
        u64 tls_drop_no_sync_data;
        u64 tls_drop_bypass_req;
-       u64 tls_dump_packets;
-       u64 tls_dump_bytes;
 #endif
        /* less likely accessed in data path */
        u64 csum_none;
index 3e78a727f3e68634e2fb5f7e65838dce963a90bc..fda0b37075e8c444bdf2adba3a0e5dc47ca47adf 100644 (file)
@@ -1278,8 +1278,10 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        mlx5_eswitch_del_vlan_action(esw, attr);
 
        for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
-               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
+               if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
                        mlx5e_detach_encap(priv, flow, out_index);
+                       kfree(attr->parse_attr->tun_info[out_index]);
+               }
        kvfree(attr->parse_attr);
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
@@ -1559,6 +1561,7 @@ static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entr
                        mlx5_packet_reformat_dealloc(priv->mdev, e->pkt_reformat);
        }
 
+       kfree(e->tun_info);
        kfree(e->encap_header);
        kfree_rcu(e, rcu);
 }
@@ -2972,6 +2975,13 @@ mlx5e_encap_get(struct mlx5e_priv *priv, struct encap_key *key,
        return NULL;
 }
 
+static struct ip_tunnel_info *dup_tun_info(const struct ip_tunnel_info *tun_info)
+{
+       size_t tun_size = sizeof(*tun_info) + tun_info->options_len;
+
+       return kmemdup(tun_info, tun_size, GFP_KERNEL);
+}
+
 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct mlx5e_tc_flow *flow,
                              struct net_device *mirred_dev,
@@ -3028,13 +3038,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        refcount_set(&e->refcnt, 1);
        init_completion(&e->res_ready);
 
+       tun_info = dup_tun_info(tun_info);
+       if (!tun_info) {
+               err = -ENOMEM;
+               goto out_err_init;
+       }
        e->tun_info = tun_info;
        err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
-       if (err) {
-               kfree(e);
-               e = NULL;
-               goto out_err;
-       }
+       if (err)
+               goto out_err_init;
 
        INIT_LIST_HEAD(&e->flows);
        hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
@@ -3075,6 +3087,12 @@ out_err:
        if (e)
                mlx5e_encap_put(priv, e);
        return err;
+
+out_err_init:
+       mutex_unlock(&esw->offloads.encap_tbl_lock);
+       kfree(tun_info);
+       kfree(e);
+       return err;
 }
 
 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
@@ -3160,7 +3178,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
                               struct mlx5_esw_flow_attr *attr,
                               u32 *action)
 {
-       int nest_level = vlan_get_encap_level(attr->parse_attr->filter_dev);
+       int nest_level = attr->parse_attr->filter_dev->lower_level;
        struct flow_action_entry vlan_act = {
                .id = FLOW_ACTION_VLAN_POP,
        };
@@ -3295,7 +3313,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        } else if (encap) {
                                parse_attr->mirred_ifindex[attr->out_count] =
                                        out_dev->ifindex;
-                               parse_attr->tun_info[attr->out_count] = info;
+                               parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
+                               if (!parse_attr->tun_info[attr->out_count])
+                                       return -ENOMEM;
                                encap = false;
                                attr->dests[attr->out_count].flags |=
                                        MLX5_ESW_DEST_ENCAP;
index d3a67a9b4eba76076ea899a82649f027e0603a92..67dc4f0921b649f7f38c750c4069d042261d1777 100644 (file)
@@ -403,7 +403,10 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
 static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq,
                                 struct mlx5_err_cqe *err_cqe)
 {
-       u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq);
+       struct mlx5_cqwq *wq = &sq->cq.wq;
+       u32 ci;
+
+       ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1);
 
        netdev_err(sq->channel->netdev,
                   "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n",
@@ -479,14 +482,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
                        skb = wi->skb;
 
                        if (unlikely(!skb)) {
-#ifdef CONFIG_MLX5_EN_TLS
-                               if (wi->resync_dump_frag) {
-                                       struct mlx5e_sq_dma *dma =
-                                               mlx5e_dma_get(sq, dma_fifo_cc++);
-
-                                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, dma);
-                               }
-#endif
+                               mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
                                sqcc += wi->num_wqebbs;
                                continue;
                        }
@@ -542,29 +538,38 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_tx_wqe_info *wi;
        struct sk_buff *skb;
+       u32 dma_fifo_cc;
+       u16 sqcc;
        u16 ci;
        int i;
 
-       while (sq->cc != sq->pc) {
-               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
+       sqcc = sq->cc;
+       dma_fifo_cc = sq->dma_fifo_cc;
+
+       while (sqcc != sq->pc) {
+               ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
                wi = &sq->db.wqe_info[ci];
                skb = wi->skb;
 
-               if (!skb) { /* nop */
-                       sq->cc++;
+               if (!skb) {
+                       mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc);
+                       sqcc += wi->num_wqebbs;
                        continue;
                }
 
                for (i = 0; i < wi->num_dma; i++) {
                        struct mlx5e_sq_dma *dma =
-                               mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+                               mlx5e_dma_get(sq, dma_fifo_cc++);
 
                        mlx5e_tx_dma_unmap(sq->pdev, dma);
                }
 
                dev_kfree_skb_any(skb);
-               sq->cc += wi->num_wqebbs;
+               sqcc += wi->num_wqebbs;
        }
+
+       sq->dma_fifo_cc = dma_fifo_cc;
+       sq->cc = sqcc;
 }
 
 #ifdef CONFIG_MLX5_CORE_IPOIB
index 00d71db15f2217f437b4d8438d1a8087d35ab080..369499e88fe8d1382a1db5322bdcff11a599934e 100644 (file)
@@ -285,7 +285,6 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
 
        mlx5_eswitch_set_rule_source_port(esw, spec, attr);
 
-       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
        if (attr->outer_match_level != MLX5_MATCH_NONE)
                spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
 
index 1d55a324a17e3d00b779d8fce3ad77b914471bdb..7879e1746297c0cb533a202193775db174f22b99 100644 (file)
@@ -177,22 +177,32 @@ mlx5_eswitch_termtbl_actions_move(struct mlx5_flow_act *src,
        memset(&src->vlan[1], 0, sizeof(src->vlan[1]));
 }
 
+static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
+                                               const struct mlx5_flow_spec *spec)
+{
+       u32 port_mask, port_value;
+
+       if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
+               return spec->flow_context.flow_source == MLX5_VPORT_UPLINK;
+
+       port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
+                            misc_parameters.source_port);
+       port_value = MLX5_GET(fte_match_param, spec->match_value,
+                             misc_parameters.source_port);
+       return (port_mask & port_value & 0xffff) == MLX5_VPORT_UPLINK;
+}
+
 bool
 mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
                              struct mlx5_flow_act *flow_act,
                              struct mlx5_flow_spec *spec)
 {
-       u32 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
-                                misc_parameters.source_port);
-       u32 port_value = MLX5_GET(fte_match_param, spec->match_value,
-                                 misc_parameters.source_port);
-
        if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table))
                return false;
 
        /* push vlan on RX */
        return (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) &&
-               ((port_mask & port_value) == MLX5_VPORT_UPLINK);
+               mlx5_eswitch_offload_is_uplink_port(esw, spec);
 }
 
 struct mlx5_flow_handle *
index 4c50efe4e7f11878895a98db4305b52d21bcbb15..61021133029e6a62452bbbad84d7894ef6df00bd 100644 (file)
@@ -464,8 +464,10 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size)
        }
 
        err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
-       if (err)
+       if (err) {
+               kvfree(in);
                goto err_cqwq;
+       }
 
        cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
        MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size));
index 579c306caa7b10345c5a0f9ae7c9892d489f1de9..3c816e81f8d9bab405cbeeb3901d40341cbdd60d 100644 (file)
@@ -507,7 +507,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                                MLX5_SET(dest_format_struct, in_dests,
                                         destination_eswitch_owner_vhca_id,
                                         dst->dest_attr.vport.vhca_id);
-                               if (extended_dest) {
+                               if (extended_dest &&
+                                   dst->dest_attr.vport.pkt_reformat) {
                                        MLX5_SET(dest_format_struct, in_dests,
                                                 packet_reformat,
                                                 !!(dst->dest_attr.vport.flags &
index d685122d9ff761794a7f89d860a90e25801f29fc..c07f3154437c6062679fd241522eb91f4afed771 100644 (file)
@@ -572,7 +572,7 @@ mlx5_fw_fatal_reporter_dump(struct devlink_health_reporter *reporter,
                return -ENOMEM;
        err = mlx5_crdump_collect(dev, cr_data);
        if (err)
-               return err;
+               goto free_data;
 
        if (priv_ctx) {
                struct mlx5_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
index 9231b39d18b21c11f971773fa65376f442d010ae..c501bf2a0252104e4460c33191605f4f9a329dff 100644 (file)
@@ -112,17 +112,11 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
        u32 out[MLX5_ST_SZ_DW(destroy_mkey_out)] = {0};
        u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)]   = {0};
        struct xarray *mkeys = &dev->priv.mkey_table;
-       struct mlx5_core_mkey *deleted_mkey;
        unsigned long flags;
 
        xa_lock_irqsave(mkeys, flags);
-       deleted_mkey = __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
+       __xa_erase(mkeys, mlx5_base_mkey(mkey->key));
        xa_unlock_irqrestore(mkeys, flags);
-       if (!deleted_mkey) {
-               mlx5_core_dbg(dev, "failed xarray delete of mkey 0x%x\n",
-                             mlx5_base_mkey(mkey->key));
-               return -ENOENT;
-       }
 
        MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
        MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
index 913f1e5aaaf2667c8130e5352f4ae8744a5b153e..d7c7467e2d53df0e7c0d2ba98d27f61df01703b2 100644 (file)
@@ -137,7 +137,8 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
 
        icm_mr->icm_start_addr = icm_mr->dm.addr;
 
-       align_diff = icm_mr->icm_start_addr % align_base;
+       /* align_base is always a power of 2 */
+       align_diff = icm_mr->icm_start_addr & (align_base - 1);
        if (align_diff)
                icm_mr->used_length = align_base - align_diff;
 
index 4187f2b112b8e2e81f38d50a2daadc1ea666bd4c..e8b656075c6ff3d90a3d857fbb4bbab752183c33 100644 (file)
@@ -788,12 +788,10 @@ again:
                         * it means that all the previous stes are the same,
                         * if so, this rule is duplicated.
                         */
-                       if (mlx5dr_ste_is_last_in_rule(nic_matcher,
-                                                      matched_ste->ste_chain_location)) {
-                               mlx5dr_info(dmn, "Duplicate rule inserted, aborting!!\n");
-                               return NULL;
-                       }
-                       return matched_ste;
+                       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+                               return matched_ste;
+
+                       mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
                }
 
                if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
index 14dcc786926ddc22ea93c6820cc117df40dd6316..4421ab22182fe7c5d254dba03e5b3ab415ea392a 100644 (file)
@@ -1186,7 +1186,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
        if (err)
                goto err_thermal_init;
 
-       if (mlxsw_driver->params_register && !reload)
+       if (mlxsw_driver->params_register)
                devlink_params_publish(devlink);
 
        return 0;
@@ -1259,7 +1259,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
                        return;
        }
 
-       if (mlxsw_core->driver->params_unregister && !reload)
+       if (mlxsw_core->driver->params_unregister)
                devlink_params_unpublish(devlink);
        mlxsw_thermal_fini(mlxsw_core->thermal);
        mlxsw_hwmon_fini(mlxsw_core->hwmon);
index 899450b28621e59bc4f5c1c1c0aa8f972852788b..7c03b661ae7ea218fe562afc14e7448d76c99104 100644 (file)
@@ -99,6 +99,7 @@ static void mlxsw_sp_rx_drop_listener(struct sk_buff *skb, u8 local_port,
        devlink = priv_to_devlink(mlxsw_sp->core);
        in_devlink_port = mlxsw_core_port_devlink_port_get(mlxsw_sp->core,
                                                           local_port);
+       skb_push(skb, ETH_HLEN);
        devlink_trap_report(devlink, skb, trap_ctx, in_devlink_port);
        consume_skb(skb);
 }
index 4d1bce4389c792a163b87938e4b8142a83808c2e..344539c0d3aac90f27c5ea9672b2a6878b0ab320 100644 (file)
@@ -261,8 +261,15 @@ static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid,
                port->pvid = vid;
 
        /* Untagged egress vlan clasification */
-       if (untagged)
+       if (untagged && port->vid != vid) {
+               if (port->vid) {
+                       dev_err(ocelot->dev,
+                               "Port already has a native VLAN: %d\n",
+                               port->vid);
+                       return -EBUSY;
+               }
                port->vid = vid;
+       }
 
        ocelot_vlan_port_apply(ocelot, port);
 
@@ -934,7 +941,7 @@ end:
 static int ocelot_vlan_rx_add_vid(struct net_device *dev, __be16 proto,
                                  u16 vid)
 {
-       return ocelot_vlan_vid_add(dev, vid, false, true);
+       return ocelot_vlan_vid_add(dev, vid, false, false);
 }
 
 static int ocelot_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
index b063eb78fa0cfc1c90c5a288169b33766fd1c8a7..aac115136720f2611c12df013748613b78f0139e 100644 (file)
@@ -388,13 +388,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                        continue;
 
                phy = of_phy_find_device(phy_node);
+               of_node_put(phy_node);
                if (!phy)
                        continue;
 
                err = ocelot_probe_port(ocelot, port, regs, phy);
                if (err) {
                        of_node_put(portnp);
-                       return err;
+                       goto out_put_ports;
                }
 
                phy_mode = of_get_phy_mode(portnp);
@@ -422,7 +423,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                                "invalid phy mode for port%d, (Q)SGMII only\n",
                                port);
                        of_node_put(portnp);
-                       return -EINVAL;
+                       err = -EINVAL;
+                       goto out_put_ports;
                }
 
                serdes = devm_of_phy_get(ocelot->dev, portnp, NULL);
@@ -435,7 +437,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
                                        "missing SerDes phys for port%d\n",
                                        port);
 
-                       goto err_probe_ports;
+                       of_node_put(portnp);
+                       goto out_put_ports;
                }
 
                ocelot->ports[port]->serdes = serdes;
@@ -447,9 +450,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
 
        dev_info(&pdev->dev, "Ocelot switch probed\n");
 
-       return 0;
-
-err_probe_ports:
+out_put_ports:
+       of_node_put(ports);
        return err;
 }
 
index 1eef446036d6a2be6bd4acb306c782f7b169f2f1..79d72c88bbef2a16a584bef5359e958fc001344d 100644 (file)
@@ -299,22 +299,6 @@ static void nfp_repr_clean(struct nfp_repr *repr)
        nfp_port_free(repr->port);
 }
 
-static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
-static struct lock_class_key nfp_repr_netdev_addr_lock_key;
-
-static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
-                                          struct netdev_queue *txq,
-                                          void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
-}
-
-static void nfp_repr_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
-}
-
 int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
                  u32 cmsg_port_id, struct nfp_port *port,
                  struct net_device *pf_netdev)
@@ -324,8 +308,6 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
        u32 repr_cap = nn->tlv_caps.repr_cap;
        int err;
 
-       nfp_repr_set_lockdep_class(netdev);
-
        repr->port = port;
        repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
        if (!repr->dst)
index 141571e2ec11152797a9267c77a5f13e3b7887d3..544012a67221ac04a915441146cc2baf2b8c55a5 100644 (file)
@@ -1356,9 +1356,6 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
        if (!is_valid_ether_addr(ndev->dev_addr))
                eth_hw_addr_random(ndev);
 
-       /* Reset the ethernet controller */
-       __lpc_eth_reset(pldat);
-
        /* then shut everything down to save power */
        __lpc_eth_shutdown(pldat);
 
index bd0583e409dfec7d216b2145ac5990ebdff88919..d25b88f53de4c501d6dc80c6634efd6bbd73a344 100644 (file)
@@ -20,6 +20,7 @@ if NET_VENDOR_PENSANDO
 config IONIC
        tristate "Pensando Ethernet IONIC Support"
        depends on 64BIT && PCI
+       select NET_DEVLINK
        help
          This enables the support for the Pensando family of Ethernet
          adapters.  More specific information on this driver can be
index 72107a0627a9957de0ce4c01e6b561f37599b510..20faa8d24c9ffea255d25bf5103b16f94613d301 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/rtnetlink.h>
index 812190e729c2d95f0cd9c54ecc319eac0308ebb9..6a95b42a8d8c4037ad4b85d8715e13946b2db6a3 100644 (file)
@@ -182,6 +182,8 @@ struct ionic_lif {
 
 #define lif_to_txqcq(lif, i)   ((lif)->txqcqs[i].qcq)
 #define lif_to_rxqcq(lif, i)   ((lif)->rxqcqs[i].qcq)
+#define lif_to_txstats(lif, i) ((lif)->txqcqs[i].stats->tx)
+#define lif_to_rxstats(lif, i) ((lif)->rxqcqs[i].stats->rx)
 #define lif_to_txq(lif, i)     (&lif_to_txqcq((lif), i)->q)
 #define lif_to_rxq(lif, i)     (&lif_to_txqcq((lif), i)->q)
 
index 15e432386b35a2ad0733c0df28e4002588bb66ea..aab3114134128f578fc2eb4acf849bcabe282df2 100644 (file)
@@ -1,6 +1,8 @@
 // SPDX-License-Identifier: GPL-2.0
 /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
 
+#include <linux/printk.h>
+#include <linux/dynamic_debug.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/utsname.h>
index e2907884f843fd7ae7e982cfbdf595dc5c4b4856..03916b6d47f2236308415b925f7e3d7f280562b4 100644 (file)
@@ -117,7 +117,8 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
        /* rx stats */
        total += MAX_Q(lif) * IONIC_NUM_RX_STATS;
 
-       if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+       if (test_bit(IONIC_LIF_UP, lif->state) &&
+           test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                /* tx debug stats */
                total += MAX_Q(lif) * (IONIC_NUM_DBG_CQ_STATS +
                                      IONIC_NUM_TX_Q_STATS +
@@ -149,7 +150,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
                        *buf += ETH_GSTRING_LEN;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                        for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
                                snprintf(*buf, ETH_GSTRING_LEN,
                                         "txq_%d_%s",
@@ -187,7 +189,8 @@ static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
                        *buf += ETH_GSTRING_LEN;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
                        for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
                                snprintf(*buf, ETH_GSTRING_LEN,
                                         "rxq_%d_cq_%s",
@@ -223,6 +226,8 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
 {
        struct ionic_lif_sw_stats lif_stats;
        struct ionic_qcq *txqcq, *rxqcq;
+       struct ionic_tx_stats *txstats;
+       struct ionic_rx_stats *rxstats;
        int i, q_num;
 
        ionic_get_lif_stats(lif, &lif_stats);
@@ -233,15 +238,17 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
        }
 
        for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
-               txqcq = lif_to_txqcq(lif, q_num);
+               txstats = &lif_to_txstats(lif, q_num);
 
                for (i = 0; i < IONIC_NUM_TX_STATS; i++) {
-                       **buf = IONIC_READ_STAT64(&txqcq->stats->tx,
+                       **buf = IONIC_READ_STAT64(txstats,
                                                  &ionic_tx_stats_desc[i]);
                        (*buf)++;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+                       txqcq = lif_to_txqcq(lif, q_num);
                        for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
                                **buf = IONIC_READ_STAT64(&txqcq->q,
                                                      &ionic_txq_stats_desc[i]);
@@ -258,22 +265,24 @@ static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
                                (*buf)++;
                        }
                        for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
-                               **buf = txqcq->stats->tx.sg_cntr[i];
+                               **buf = txstats->sg_cntr[i];
                                (*buf)++;
                        }
                }
        }
 
        for (q_num = 0; q_num < MAX_Q(lif); q_num++) {
-               rxqcq = lif_to_rxqcq(lif, q_num);
+               rxstats = &lif_to_rxstats(lif, q_num);
 
                for (i = 0; i < IONIC_NUM_RX_STATS; i++) {
-                       **buf = IONIC_READ_STAT64(&rxqcq->stats->rx,
+                       **buf = IONIC_READ_STAT64(rxstats,
                                                  &ionic_rx_stats_desc[i]);
                        (*buf)++;
                }
 
-               if (test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+               if (test_bit(IONIC_LIF_UP, lif->state) &&
+                   test_bit(IONIC_LIF_SW_DEBUG_STATS, lif->state)) {
+                       rxqcq = lif_to_rxqcq(lif, q_num);
                        for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
                                **buf = IONIC_READ_STAT64(&rxqcq->cq,
                                                   &ionic_dbg_cq_stats_desc[i]);
index 2ce70097d018b8e7e846e71d4a9b79204838c0cf..38f7f40b3a4d0dd5773a24185e6602d7479fc8b7 100644 (file)
 #define QED_ROCE_QPS                   (8192)
 #define QED_ROCE_DPIS                  (8)
 #define QED_RDMA_SRQS                   QED_ROCE_QPS
-#define QED_NVM_CFG_SET_FLAGS          0xE
-#define QED_NVM_CFG_SET_PF_FLAGS       0x1E
 #define QED_NVM_CFG_GET_FLAGS          0xA
 #define QED_NVM_CFG_GET_PF_FLAGS       0x1A
+#define QED_NVM_CFG_MAX_ATTRS          50
 
 static char version[] =
        "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -2255,6 +2254,7 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 {
        struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
        u8 entity_id, len, buf[32];
+       bool need_nvm_init = true;
        struct qed_ptt *ptt;
        u16 cfg_id, count;
        int rc = 0, i;
@@ -2271,8 +2271,10 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
 
        DP_VERBOSE(cdev, NETIF_MSG_DRV,
                   "Read config ids: num_attrs = %0d\n", count);
-       /* NVM CFG ID attributes */
-       for (i = 0; i < count; i++) {
+       /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
+        * arithmetic operations in the implementation.
+        */
+       for (i = 1; i <= count; i++) {
                cfg_id = *((u16 *)*data);
                *data += 2;
                entity_id = **data;
@@ -2282,8 +2284,21 @@ static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
                memcpy(buf, *data, len);
                *data += len;
 
-               flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
-                       QED_NVM_CFG_SET_FLAGS;
+               flags = 0;
+               if (need_nvm_init) {
+                       flags |= QED_NVM_CFG_OPTION_INIT;
+                       need_nvm_init = false;
+               }
+
+               /* Commit to flash and free the resources */
+               if (!(i % QED_NVM_CFG_MAX_ATTRS) || i == count) {
+                       flags |= QED_NVM_CFG_OPTION_COMMIT |
+                                QED_NVM_CFG_OPTION_FREE;
+                       need_nvm_init = true;
+               }
+
+               if (entity_id)
+                       flags |= QED_NVM_CFG_OPTION_ENTITY_SEL;
 
                DP_VERBOSE(cdev, NETIF_MSG_DRV,
                           "cfg_id = %d entity = %d len = %d\n", cfg_id,
index 78f77b712b1012509528e3a9f2889d6ea057b596..dcb5c917f3733fd60642bbff0dc45104bd91902c 100644 (file)
@@ -2005,7 +2005,7 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
            (qed_iov_validate_active_txq(p_hwfn, vf))) {
                vf->b_malicious = true;
                DP_NOTICE(p_hwfn,
-                         "VF [%02x] - considered malicious; Unable to stop RX/TX queuess\n",
+                         "VF [%02x] - considered malicious; Unable to stop RX/TX queues\n",
                          vf->abs_vf_id);
                status = PFVF_STATUS_MALICIOUS;
                goto out;
index 457444894d807bc7264d6e54c2bb161ad6a3abe2..b4b8ba00ee0151083ca33a14ebf860c7fb7e103b 100644 (file)
@@ -2787,6 +2787,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                netdev_err(qdev->ndev,
                                           "PCI mapping failed with error: %d\n",
                                           err);
+                               dev_kfree_skb_irq(skb);
                                ql_free_large_buffers(qdev);
                                return -ENOMEM;
                        }
index 74f81fe03810c419279a7b200190fdbbd3c99ed6..5064c292b873d294481e678782bc4949c709e3c6 100644 (file)
@@ -1029,6 +1029,10 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
 {
        int value;
 
+       /* Work around issue with chip reporting wrong PHY ID */
+       if (reg == MII_PHYSID2)
+               return 0xc912;
+
        r8168dp_2_mdio_start(tp);
 
        value = r8169_mdio_read(tp, reg);
@@ -4146,6 +4150,14 @@ static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
        rtl_lock_config_regs(tp);
 }
 
+static void rtl_jumbo_config(struct rtl8169_private *tp, int mtu)
+{
+       if (mtu > ETH_DATA_LEN)
+               rtl_hw_jumbo_enable(tp);
+       else
+               rtl_hw_jumbo_disable(tp);
+}
+
 DECLARE_RTL_COND(rtl_chipcmd_cond)
 {
        return RTL_R8(tp, ChipCmd) & CmdReset;
@@ -4442,11 +4454,6 @@ static void rtl8168g_set_pause_thresholds(struct rtl8169_private *tp,
 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
 {
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN) {
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B |
-                                        PCI_EXP_DEVCTL_NOSNOOP_EN);
-       }
 }
 
 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
@@ -4462,9 +4469,6 @@ static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_disable_clock_request(tp);
 }
 
@@ -4490,9 +4494,6 @@ static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
        rtl_set_def_aspm_entry_latency(tp);
 
        RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 }
 
 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
@@ -4503,9 +4504,6 @@ static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
 
        /* Magic. */
        RTL_W8(tp, DBG_REG, 0x20);
-
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
 }
 
 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
@@ -4611,9 +4609,6 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_1);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_disable_clock_request(tp);
 
        /* Reset tx FIFO pointer */
@@ -4636,9 +4631,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
 
        rtl_ephy_init(tp, e_info_8168e_2);
 
-       if (tp->dev->mtu <= ETH_DATA_LEN)
-               rtl_tx_performance_tweak(tp, PCI_EXP_DEVCTL_READRQ_4096B);
-
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000);
        rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06);
@@ -5485,6 +5477,8 @@ static void rtl_hw_start(struct  rtl8169_private *tp)
        rtl_set_rx_tx_desc_registers(tp);
        rtl_lock_config_regs(tp);
 
+       rtl_jumbo_config(tp, tp->dev->mtu);
+
        /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
        RTL_R16(tp, CPlusCmd);
        RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
@@ -5498,10 +5492,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
 
-       if (new_mtu > ETH_DATA_LEN)
-               rtl_hw_jumbo_enable(tp);
-       else
-               rtl_hw_jumbo_disable(tp);
+       rtl_jumbo_config(tp, new_mtu);
 
        dev->mtu = new_mtu;
        netdev_update_features(dev);
index 55db7fbd43cc3cfd112c31803e1318bda1555e05..f9e6744d8fd6ea0527a86271044d400788158a27 100644 (file)
@@ -282,7 +282,6 @@ struct netsec_desc_ring {
        void *vaddr;
        u16 head, tail;
        u16 xdp_xmit; /* netsec_xdp_xmit packets */
-       bool is_xdp;
        struct page_pool *page_pool;
        struct xdp_rxq_info xdp_rxq;
        spinlock_t lock; /* XDP tx queue locking */
@@ -634,8 +633,7 @@ static bool netsec_clean_tx_dring(struct netsec_priv *priv)
        unsigned int bytes;
        int cnt = 0;
 
-       if (dring->is_xdp)
-               spin_lock(&dring->lock);
+       spin_lock(&dring->lock);
 
        bytes = 0;
        entry = dring->vaddr + DESC_SZ * tail;
@@ -682,8 +680,8 @@ next:
                entry = dring->vaddr + DESC_SZ * tail;
                cnt++;
        }
-       if (dring->is_xdp)
-               spin_unlock(&dring->lock);
+
+       spin_unlock(&dring->lock);
 
        if (!cnt)
                return false;
@@ -799,9 +797,6 @@ static void netsec_set_tx_de(struct netsec_priv *priv,
        de->data_buf_addr_lw = lower_32_bits(desc->dma_addr);
        de->buf_len_info = (tx_ctrl->tcp_seg_len << 16) | desc->len;
        de->attr = attr;
-       /* under spin_lock if using XDP */
-       if (!dring->is_xdp)
-               dma_wmb();
 
        dring->desc[idx] = *desc;
        if (desc->buf_type == TYPE_NETSEC_SKB)
@@ -1123,12 +1118,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        u16 tso_seg_len = 0;
        int filled;
 
-       if (dring->is_xdp)
-               spin_lock_bh(&dring->lock);
+       spin_lock_bh(&dring->lock);
        filled = netsec_desc_used(dring);
        if (netsec_check_stop_tx(priv, filled)) {
-               if (dring->is_xdp)
-                       spin_unlock_bh(&dring->lock);
+               spin_unlock_bh(&dring->lock);
                net_warn_ratelimited("%s %s Tx queue full\n",
                                     dev_name(priv->dev), ndev->name);
                return NETDEV_TX_BUSY;
@@ -1161,8 +1154,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        tx_desc.dma_addr = dma_map_single(priv->dev, skb->data,
                                          skb_headlen(skb), DMA_TO_DEVICE);
        if (dma_mapping_error(priv->dev, tx_desc.dma_addr)) {
-               if (dring->is_xdp)
-                       spin_unlock_bh(&dring->lock);
+               spin_unlock_bh(&dring->lock);
                netif_err(priv, drv, priv->ndev,
                          "%s: DMA mapping failed\n", __func__);
                ndev->stats.tx_dropped++;
@@ -1177,8 +1169,7 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb,
        netdev_sent_queue(priv->ndev, skb->len);
 
        netsec_set_tx_de(priv, dring, &tx_ctrl, &tx_desc, skb);
-       if (dring->is_xdp)
-               spin_unlock_bh(&dring->lock);
+       spin_unlock_bh(&dring->lock);
        netsec_write(priv, NETSEC_REG_NRM_TX_PKTCNT, 1); /* submit another tx */
 
        return NETDEV_TX_OK;
@@ -1262,7 +1253,6 @@ err:
 static void netsec_setup_tx_dring(struct netsec_priv *priv)
 {
        struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX];
-       struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
        int i;
 
        for (i = 0; i < DESC_NUM; i++) {
@@ -1275,12 +1265,6 @@ static void netsec_setup_tx_dring(struct netsec_priv *priv)
                 */
                de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD;
        }
-
-       if (xdp_prog)
-               dring->is_xdp = true;
-       else
-               dring->is_xdp = false;
-
 }
 
 static int netsec_setup_rx_dring(struct netsec_priv *priv)
index f97a4096f8fc8e44ef2456e822952a885e48de83..ddcc191febdb22db310fe9833bb39facfbc1de74 100644 (file)
@@ -651,7 +651,8 @@ static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
                        }
                }
        } else {
-               netdev_info(dev, "Too many address, switching to promiscuous\n");
+               if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+                       netdev_info(dev, "Too many address, switching to promiscuous\n");
                v = EMAC_FRM_FLT_RXALL;
        }
 
index 9b4b5f69fc0211995591f772f9e8f1bf4dea2ae8..5a7b0aca1d31af097ece2070ad0f657be3ea49bf 100644 (file)
@@ -401,8 +401,11 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
        int numhashregs = (hw->multicast_filter_bins >> 5);
        int mcbitslog2 = hw->mcast_bits_log2;
        unsigned int value;
+       u32 mc_filter[8];
        int i;
 
+       memset(mc_filter, 0, sizeof(mc_filter));
+
        value = readl(ioaddr + GMAC_PACKET_FILTER);
        value &= ~GMAC_PACKET_FILTER_HMC;
        value &= ~GMAC_PACKET_FILTER_HPF;
@@ -416,16 +419,13 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                /* Pass all multi */
                value |= GMAC_PACKET_FILTER_PM;
                /* Set all the bits of the HASH tab */
-               for (i = 0; i < numhashregs; i++)
-                       writel(0xffffffff, ioaddr + GMAC_HASH_TAB(i));
+               memset(mc_filter, 0xff, sizeof(mc_filter));
        } else if (!netdev_mc_empty(dev)) {
                struct netdev_hw_addr *ha;
-               u32 mc_filter[8];
 
                /* Hash filter for multicast */
                value |= GMAC_PACKET_FILTER_HMC;
 
-               memset(mc_filter, 0, sizeof(mc_filter));
                netdev_for_each_mc_addr(ha, dev) {
                        /* The upper n bits of the calculated CRC are used to
                         * index the contents of the hash table. The number of
@@ -440,14 +440,15 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
                         */
                        mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
                }
-               for (i = 0; i < numhashregs; i++)
-                       writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
        }
 
+       for (i = 0; i < numhashregs; i++)
+               writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
+
        value |= GMAC_PACKET_FILTER_HPF;
 
        /* Handle multiple unicast addresses */
-       if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+       if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
                /* Switch to promiscuous mode if more than 128 addrs
                 * are required
                 */
index 3f4f3132e16b3b019869de9c6a219eb677e628a4..e436fa160c7d6a473f46f68c0facc1a31b0dc69a 100644 (file)
@@ -515,6 +515,7 @@ int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
 
        if (!enable) {
                val |= PPSCMDx(index, 0x5);
+               val |= PPSEN0;
                writel(val, ioaddr + MAC_PPS_CONTROL);
                return 0;
        }
index 5923ca62d7938504c3a200df92459a6f6da0402e..99037386080a65055b6bcb1cbbddb45643a471aa 100644 (file)
@@ -84,7 +84,7 @@
 #define XGMAC_TSIE                     BIT(12)
 #define XGMAC_LPIIE                    BIT(5)
 #define XGMAC_PMTIE                    BIT(4)
-#define XGMAC_INT_DEFAULT_EN           (XGMAC_LPIIE | XGMAC_PMTIE | XGMAC_TSIE)
+#define XGMAC_INT_DEFAULT_EN           (XGMAC_LPIIE | XGMAC_PMTIE)
 #define XGMAC_Qx_TX_FLOW_CTRL(x)       (0x00000070 + (x) * 4)
 #define XGMAC_PT                       GENMASK(31, 16)
 #define XGMAC_PT_SHIFT                 16
 #define XGMAC_HWFEAT_GMIISEL           BIT(1)
 #define XGMAC_HW_FEATURE1              0x00000120
 #define XGMAC_HWFEAT_L3L4FNUM          GENMASK(30, 27)
+#define XGMAC_HWFEAT_HASHTBLSZ         GENMASK(25, 24)
 #define XGMAC_HWFEAT_RSSEN             BIT(20)
 #define XGMAC_HWFEAT_TSOEN             BIT(18)
 #define XGMAC_HWFEAT_SPHEN             BIT(17)
index 2b277b2c586bbd1486e701efa72dfa91a2f2d4d3..5031398e612c241b12b99fc64c1205722ea6ea42 100644 (file)
@@ -472,7 +472,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
        dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
 
        /* Handle multiple unicast addresses */
-       if (netdev_uc_count(dev) > XGMAC_ADDR_MAX) {
+       if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
                value |= XGMAC_FILTER_PR;
        } else {
                struct netdev_hw_addr *ha;
@@ -523,8 +523,8 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
                                  struct stmmac_rss *cfg, u32 num_rxq)
 {
        void __iomem *ioaddr = hw->pcsr;
+       u32 value, *key;
        int i, ret;
-       u32 value;
 
        value = readl(ioaddr + XGMAC_RSS_CTRL);
        if (!cfg || !cfg->enable) {
@@ -533,8 +533,9 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
                return 0;
        }
 
-       for (i = 0; i < (sizeof(cfg->key) / sizeof(u32)); i++) {
-               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, cfg->key[i]);
+       key = (u32 *)cfg->key;
+       for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
+               ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
                if (ret)
                        return ret;
        }
index 53c4a40d8386b5bd974c0bd8e411d676542c3db1..965cbe3e6f51f262344e4bd9f62d22d75718d295 100644 (file)
@@ -380,6 +380,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
        /* MAC HW feature 1 */
        hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
        dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+       dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
        dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
        dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
        dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
index d3232738fb257d960713738f23273886cdeeb645..4e9c848c67ccc93477839c34959cb6f3760e7e35 100644 (file)
@@ -629,6 +629,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
                        config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
                        ptp_v2 = PTP_TCR_TSVER2ENA;
                        snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+                       ts_event_en = PTP_TCR_TSEVNTENA;
                        ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
                        ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
                        ptp_over_ethernet = PTP_TCR_TSIPENA;
@@ -2609,7 +2610,7 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
        }
 
        if (priv->hw->pcs)
-               stmmac_pcs_ctrl_ane(priv, priv->hw, 1, priv->hw->ps, 0);
+               stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
 
        /* set TX and RX rings length */
        stmmac_set_rings_length(priv);
@@ -2994,6 +2995,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
        } else {
                stmmac_set_desc_addr(priv, first, des);
                tmp_pay_len = pay_len;
+               des += proto_hdr_len;
        }
 
        stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -4715,11 +4717,9 @@ int stmmac_suspend(struct device *dev)
        if (!ndev || !netif_running(ndev))
                return 0;
 
-       mutex_lock(&priv->lock);
+       phylink_mac_change(priv->phylink, false);
 
-       rtnl_lock();
-       phylink_stop(priv->phylink);
-       rtnl_unlock();
+       mutex_lock(&priv->lock);
 
        netif_device_detach(ndev);
        stmmac_stop_all_queues(priv);
@@ -4734,11 +4734,19 @@ int stmmac_suspend(struct device *dev)
                stmmac_pmt(priv, priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
+               mutex_unlock(&priv->lock);
+               rtnl_lock();
+               phylink_stop(priv->phylink);
+               rtnl_unlock();
+               mutex_lock(&priv->lock);
+
                stmmac_mac_set(priv, priv->ioaddr, false);
                pinctrl_pm_select_sleep_state(priv->device);
                /* Disable clock in case of PWM is off */
-               clk_disable(priv->plat->pclk);
-               clk_disable(priv->plat->stmmac_clk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_disable_unprepare(priv->plat->clk_ptp_ref);
+               clk_disable_unprepare(priv->plat->pclk);
+               clk_disable_unprepare(priv->plat->stmmac_clk);
        }
        mutex_unlock(&priv->lock);
 
@@ -4801,8 +4809,10 @@ int stmmac_resume(struct device *dev)
        } else {
                pinctrl_pm_select_default_state(priv->device);
                /* enable the clk previously disabled */
-               clk_enable(priv->plat->stmmac_clk);
-               clk_enable(priv->plat->pclk);
+               clk_prepare_enable(priv->plat->stmmac_clk);
+               clk_prepare_enable(priv->plat->pclk);
+               if (priv->plat->clk_ptp_ref)
+                       clk_prepare_enable(priv->plat->clk_ptp_ref);
                /* reset the phy so that it's ready */
                if (priv->mii)
                        stmmac_mdio_reset(priv->mii);
@@ -4824,12 +4834,16 @@ int stmmac_resume(struct device *dev)
 
        stmmac_start_all_queues(priv);
 
-       rtnl_lock();
-       phylink_start(priv->phylink);
-       rtnl_unlock();
-
        mutex_unlock(&priv->lock);
 
+       if (!device_may_wakeup(priv->device)) {
+               rtnl_lock();
+               phylink_start(priv->phylink);
+               rtnl_unlock();
+       }
+
+       phylink_mac_change(priv->phylink, true);
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(stmmac_resume);
index 173493db038c0a9f5199bf1675429e0b379a6b4f..df638b18b72c7f895c6cb65b13b5119902fc30b8 100644 (file)
@@ -164,7 +164,7 @@ static int stmmac_enable(struct ptp_clock_info *ptp,
 /* structure describing a PTP hardware clock */
 static struct ptp_clock_info stmmac_ptp_clock_ops = {
        .owner = THIS_MODULE,
-       .name = "stmmac_ptp_clock",
+       .name = "stmmac ptp",
        .max_adj = 62500000,
        .n_alarm = 0,
        .n_ext_ts = 0,
index 5f66f6161629a3d0e4bb89e42f011f4db4f39931..e4ac3c4014328c50402f10bdf59c96a05e86f351 100644 (file)
@@ -487,8 +487,8 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
 
 static int stmmac_test_hfilt(struct stmmac_priv *priv)
 {
-       unsigned char gd_addr[ETH_ALEN] = {0x01, 0x00, 0xcc, 0xcc, 0xdd, 0xdd};
-       unsigned char bd_addr[ETH_ALEN] = {0x09, 0x00, 0xaa, 0xaa, 0xbb, 0xbb};
+       unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+       unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05};
        struct stmmac_packet_attrs attr = { };
        int ret;
 
@@ -496,6 +496,9 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
        if (ret)
                return ret;
 
+       if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
+
        ret = dev_mc_add(priv->dev, gd_addr);
        if (ret)
                return ret;
@@ -573,6 +576,8 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (!priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
 
        /* Remove all MC addresses */
        __dev_mc_unsync(priv->dev, NULL);
@@ -611,6 +616,8 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
 
        if (stmmac_filter_check(priv))
                return -EOPNOTSUPP;
+       if (!priv->hw->multicast_filter_bins)
+               return -EOPNOTSUPP;
 
        /* Remove all UC addresses */
        __dev_uc_unsync(priv->dev, NULL);
@@ -1564,10 +1571,6 @@ static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
        struct stmmac_packet_attrs attr = { };
        int size = priv->dma_buf_sz;
 
-       /* Only XGMAC has SW support for multiple RX descs in same packet */
-       if (priv->plat->has_xgmac)
-               size = priv->dev->max_mtu;
-
        attr.dst = priv->dev->dev_addr;
        attr.max_size = size - ETH_FCS_LEN;
        attr.queue_mapping = queue;
index e231098061b6e842574002c5edd2f48d96e6e71c..f9a9a9d82233375638dacb3c1b3048d11c6efb30 100644 (file)
@@ -510,7 +510,7 @@ static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
        return NULL;
 }
 
-struct {
+static struct {
        int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
                  struct stmmac_flow_entry *entry);
 } tc_flow_parsers[] = {
index a65edd2770e66e23bf5faedc9912c64555f3ec52..37ba708ac78136c70df9897999fd8c2c28e96302 100644 (file)
@@ -722,7 +722,7 @@ static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  * Has to be called under ctlr lock
  */
-int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 {
        int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
        int free_rx_num = 0, free_tx_num = 0;
index bbbc1dcb6ab5ca169cca257dcabf8705e08b5af0..b517c1af9de052d6d5fcc2d8b249d8544d5b49b9 100644 (file)
@@ -1237,8 +1237,17 @@ static int fjes_probe(struct platform_device *plat_dev)
        adapter->open_guard = false;
 
        adapter->txrx_wq = alloc_workqueue(DRV_NAME "/txrx", WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->txrx_wq)) {
+               err = -ENOMEM;
+               goto err_free_netdev;
+       }
+
        adapter->control_wq = alloc_workqueue(DRV_NAME "/control",
                                              WQ_MEM_RECLAIM, 0);
+       if (unlikely(!adapter->control_wq)) {
+               err = -ENOMEM;
+               goto err_free_txrx_wq;
+       }
 
        INIT_WORK(&adapter->tx_stall_task, fjes_tx_stall_task);
        INIT_WORK(&adapter->raise_intr_rxdata_task,
@@ -1255,7 +1264,7 @@ static int fjes_probe(struct platform_device *plat_dev)
        hw->hw_res.irq = platform_get_irq(plat_dev, 0);
        err = fjes_hw_init(&adapter->hw);
        if (err)
-               goto err_free_netdev;
+               goto err_free_control_wq;
 
        /* setup MAC address (02:00:00:00:00:[epid])*/
        netdev->dev_addr[0] = 2;
@@ -1277,6 +1286,10 @@ static int fjes_probe(struct platform_device *plat_dev)
 
 err_hw_exit:
        fjes_hw_exit(&adapter->hw);
+err_free_control_wq:
+       destroy_workqueue(adapter->control_wq);
+err_free_txrx_wq:
+       destroy_workqueue(adapter->txrx_wq);
 err_free_netdev:
        free_netdev(netdev);
 err_out:
index fbec711ff514af3da11fa9473fe2ef0c1732ba5c..fbea6f232819e905f0fedd832127f4f9ab7a4aba 100644 (file)
@@ -107,27 +107,6 @@ struct bpqdev {
 
 static LIST_HEAD(bpq_devices);
 
-/*
- * bpqether network devices are paired with ethernet devices below them, so
- * form a special "super class" of normal ethernet devices; split their locks
- * off into a separate class since they always nest.
- */
-static struct lock_class_key bpq_netdev_xmit_lock_key;
-static struct lock_class_key bpq_netdev_addr_lock_key;
-
-static void bpq_set_lockdep_class_one(struct net_device *dev,
-                                     struct netdev_queue *txq,
-                                     void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &bpq_netdev_xmit_lock_key);
-}
-
-static void bpq_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bpq_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, bpq_set_lockdep_class_one, NULL);
-}
-
 /* ------------------------------------------------------------------------ */
 
 
@@ -498,7 +477,6 @@ static int bpq_new_device(struct net_device *edev)
        err = register_netdevice(ndev);
        if (err)
                goto error;
-       bpq_set_lockdep_class(ndev);
 
        /* List protected by RTNL */
        list_add_rcu(&bpq->bpq_list, &bpq_devices);
index 39dddcd8b3cb0129d25c98400cca6692fa8b790c..963509add611ffc1de2f2ebbbfe13f9f58cb5d08 100644 (file)
@@ -982,7 +982,7 @@ static int netvsc_attach(struct net_device *ndev,
        if (netif_running(ndev)) {
                ret = rndis_filter_open(nvdev);
                if (ret)
-                       return ret;
+                       goto err;
 
                rdev = nvdev->extension;
                if (!rdev->link_state)
@@ -990,6 +990,13 @@ static int netvsc_attach(struct net_device *ndev,
        }
 
        return 0;
+
+err:
+       netif_device_detach(ndev);
+
+       rndis_filter_device_remove(hdev, nvdev);
+
+       return ret;
 }
 
 static int netvsc_set_channels(struct net_device *net,
@@ -1807,8 +1814,10 @@ static int netvsc_set_features(struct net_device *ndev,
 
        ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
 
-       if (ret)
+       if (ret) {
                features ^= NETIF_F_LRO;
+               ndev->features = features;
+       }
 
 syncvf:
        if (!vf_netdev)
@@ -2335,8 +2344,6 @@ static int netvsc_probe(struct hv_device *dev,
                NETIF_F_HW_VLAN_CTAG_RX;
        net->vlan_features = net->features;
 
-       netdev_lockdep_set_classes(net);
-
        /* MTU range: 68 - 1500 or 65521 */
        net->min_mtu = NETVSC_MTU_MIN;
        if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
index ceddb424f887d8d714dbdb25749178dc12ca5275..0dd0ba915ab970cf7a142a57279c9271c22c84a9 100644 (file)
@@ -1137,10 +1137,11 @@ static void atusb_disconnect(struct usb_interface *interface)
 
        ieee802154_unregister_hw(atusb->hw);
 
+       usb_put_dev(atusb->usb_dev);
+
        ieee802154_free_hw(atusb->hw);
 
        usb_set_intfdata(interface, NULL);
-       usb_put_dev(atusb->usb_dev);
 
        pr_debug("%s done\n", __func__);
 }
index 11402dc347db33b66ba45e1ffb60aedaf48c6ae5..430c93786153457a53fbae09f894da4815f73f6b 100644 (file)
@@ -3145,12 +3145,12 @@ static int ca8210_probe(struct spi_device *spi_device)
                goto error;
        }
 
+       priv->spi->dev.platform_data = pdata;
        ret = ca8210_get_platform_data(priv->spi, pdata);
        if (ret) {
                dev_crit(&spi_device->dev, "ca8210_get_platform_data failed\n");
                goto error;
        }
-       priv->spi->dev.platform_data = pdata;
 
        ret = ca8210_dev_com_init(priv);
        if (ret) {
index 17f2300e63eed773832d7bb73985b86d562b9508..8dc04e2590b18b1b41239c4ebf8ac1382942a8d5 100644 (file)
@@ -800,7 +800,7 @@ mcr20a_handle_rx_read_buf_complete(void *context)
        if (!skb)
                return;
 
-       memcpy(skb_put(skb, len), lp->rx_buf, len);
+       __skb_put_data(skb, lp->rx_buf, len);
        ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
 
        print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
index 887bbba4631e7ffecd2d5158b0cc91644ba7ad8c..ba3dfac1d90433fc20650ba644990f7a339d3949 100644 (file)
@@ -131,8 +131,6 @@ static int ipvlan_init(struct net_device *dev)
        dev->gso_max_segs = phy_dev->gso_max_segs;
        dev->hard_header_len = phy_dev->hard_header_len;
 
-       netdev_lockdep_set_classes(dev);
-
        ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
        if (!ipvlan->pcpu_stats)
                return -ENOMEM;
index cb7637364b40d85c9595da833873be1e99db3f7c..afd8b2a082454b57ccf51183e028e552fff1c91f 100644 (file)
@@ -267,7 +267,6 @@ struct macsec_dev {
        struct pcpu_secy_stats __percpu *stats;
        struct list_head secys;
        struct gro_cells gro_cells;
-       unsigned int nest_level;
 };
 
 /**
@@ -2750,7 +2749,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
 
 #define MACSEC_FEATURES \
        (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
-static struct lock_class_key macsec_netdev_addr_lock_key;
 
 static int macsec_dev_init(struct net_device *dev)
 {
@@ -2958,11 +2956,6 @@ static int macsec_get_iflink(const struct net_device *dev)
        return macsec_priv(dev)->real_dev->ifindex;
 }
 
-static int macsec_get_nest_level(struct net_device *dev)
-{
-       return macsec_priv(dev)->nest_level;
-}
-
 static const struct net_device_ops macsec_netdev_ops = {
        .ndo_init               = macsec_dev_init,
        .ndo_uninit             = macsec_dev_uninit,
@@ -2976,7 +2969,6 @@ static const struct net_device_ops macsec_netdev_ops = {
        .ndo_start_xmit         = macsec_start_xmit,
        .ndo_get_stats64        = macsec_get_stats64,
        .ndo_get_iflink         = macsec_get_iflink,
-       .ndo_get_lock_subclass  = macsec_get_nest_level,
 };
 
 static const struct device_type macsec_type = {
@@ -3001,12 +2993,10 @@ static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
 static void macsec_free_netdev(struct net_device *dev)
 {
        struct macsec_dev *macsec = macsec_priv(dev);
-       struct net_device *real_dev = macsec->real_dev;
 
        free_percpu(macsec->stats);
        free_percpu(macsec->secy.tx_sc.stats);
 
-       dev_put(real_dev);
 }
 
 static void macsec_setup(struct net_device *dev)
@@ -3261,14 +3251,6 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
        if (err < 0)
                return err;
 
-       dev_hold(real_dev);
-
-       macsec->nest_level = dev_get_nest_level(real_dev) + 1;
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macsec_netdev_addr_lock_key,
-                                      macsec_get_nest_level(dev));
-
        err = netdev_upper_dev_link(real_dev, dev, extack);
        if (err < 0)
                goto unregister;
index 940192c057b6bdef4e7a0b7d6b7fe5ff2f4f78fa..34fc59bd1e20be52a6ea186a4c24ea94e4eb3882 100644 (file)
@@ -852,8 +852,6 @@ static int macvlan_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  * "super class" of normal network devices; split their locks off into a
  * separate class since they always nest.
  */
-static struct lock_class_key macvlan_netdev_addr_lock_key;
-
 #define ALWAYS_ON_OFFLOADS \
        (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE | \
         NETIF_F_GSO_ROBUST | NETIF_F_GSO_ENCAP_ALL)
@@ -869,19 +867,6 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
 #define MACVLAN_STATE_MASK \
        ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
 
-static int macvlan_get_nest_level(struct net_device *dev)
-{
-       return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
-}
-
-static void macvlan_set_lockdep_class(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &macvlan_netdev_addr_lock_key,
-                                      macvlan_get_nest_level(dev));
-}
-
 static int macvlan_init(struct net_device *dev)
 {
        struct macvlan_dev *vlan = netdev_priv(dev);
@@ -900,8 +885,6 @@ static int macvlan_init(struct net_device *dev)
        dev->gso_max_segs       = lowerdev->gso_max_segs;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
-       macvlan_set_lockdep_class(dev);
-
        vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->pcpu_stats)
                return -ENOMEM;
@@ -1161,7 +1144,6 @@ static const struct net_device_ops macvlan_netdev_ops = {
        .ndo_fdb_add            = macvlan_fdb_add,
        .ndo_fdb_del            = macvlan_fdb_del,
        .ndo_fdb_dump           = ndo_dflt_fdb_dump,
-       .ndo_get_lock_subclass  = macvlan_get_nest_level,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = macvlan_dev_poll_controller,
        .ndo_netpoll_setup      = macvlan_dev_netpoll_setup,
@@ -1445,7 +1427,6 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
        vlan->dev      = dev;
        vlan->port     = port;
        vlan->set_features = MACVLAN_FEATURES;
-       vlan->nest_level = dev_get_nest_level(lowerdev) + 1;
 
        vlan->mode     = MACVLAN_MODE_VEPA;
        if (data && data[IFLA_MACVLAN_MODE])
index 56576d4f34a5e0d9f28734a1ab0c47be2ddd8fb2..54ca6681ba3182fa758f3eb1a6c768726ec92cb7 100644 (file)
@@ -806,9 +806,11 @@ static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
 {
        struct nsim_dev_port *nsim_dev_port, *tmp;
 
+       mutex_lock(&nsim_dev->port_list_lock);
        list_for_each_entry_safe(nsim_dev_port, tmp,
                                 &nsim_dev->port_list, list)
                __nsim_dev_port_del(nsim_dev_port);
+       mutex_unlock(&nsim_dev->port_list_lock);
 }
 
 int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
@@ -822,14 +824,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
                return PTR_ERR(nsim_dev);
        dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
 
+       mutex_lock(&nsim_dev->port_list_lock);
        for (i = 0; i < nsim_bus_dev->port_count; i++) {
                err = __nsim_dev_port_add(nsim_dev, i);
                if (err)
                        goto err_port_del_all;
        }
+       mutex_unlock(&nsim_dev->port_list_lock);
        return 0;
 
 err_port_del_all:
+       mutex_unlock(&nsim_dev->port_list_lock);
        nsim_dev_port_del_all(nsim_dev);
        nsim_dev_destroy(nsim_dev);
        return err;
index f61d094746c069bb7ac1c6db9eb77fe0820d2562..1a251f76d09b42082f2a9c5176314d256fe211b8 100644 (file)
@@ -241,8 +241,8 @@ static struct pernet_operations nsim_fib_net_ops = {
 
 void nsim_fib_exit(void)
 {
-       unregister_pernet_subsys(&nsim_fib_net_ops);
        unregister_fib_notifier(&nsim_fib_nb);
+       unregister_pernet_subsys(&nsim_fib_net_ops);
 }
 
 int nsim_fib_init(void)
@@ -258,6 +258,7 @@ int nsim_fib_init(void)
        err = register_fib_notifier(&nsim_fib_nb, nsim_fib_dump_inconsistent);
        if (err < 0) {
                pr_err("Failed to register fib notifier\n");
+               unregister_pernet_subsys(&nsim_fib_net_ops);
                goto err_out;
        }
 
index 2aa7b2e600464fb89383d508940d08f0c2935c2a..1eb5d4fb8925fbf091f1349c76b5faea199e9373 100644 (file)
 #include <linux/of_gpio.h>
 #include <linux/gpio/consumer.h>
 
+#define AT803X_SPECIFIC_STATUS                 0x11
+#define AT803X_SS_SPEED_MASK                   (3 << 14)
+#define AT803X_SS_SPEED_1000                   (2 << 14)
+#define AT803X_SS_SPEED_100                    (1 << 14)
+#define AT803X_SS_SPEED_10                     (0 << 14)
+#define AT803X_SS_DUPLEX                       BIT(13)
+#define AT803X_SS_SPEED_DUPLEX_RESOLVED                BIT(11)
+#define AT803X_SS_MDIX                         BIT(6)
+
 #define AT803X_INTR_ENABLE                     0x12
 #define AT803X_INTR_ENABLE_AUTONEG_ERR         BIT(15)
 #define AT803X_INTR_ENABLE_SPEED_CHANGED       BIT(14)
@@ -357,6 +366,64 @@ static int at803x_aneg_done(struct phy_device *phydev)
        return aneg_done;
 }
 
+static int at803x_read_status(struct phy_device *phydev)
+{
+       int ss, err, old_link = phydev->link;
+
+       /* Update the link, but return if there was an error */
+       err = genphy_update_link(phydev);
+       if (err)
+               return err;
+
+       /* why bother the PHY if nothing can have changed */
+       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+               return 0;
+
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+
+       err = genphy_read_lpa(phydev);
+       if (err < 0)
+               return err;
+
+       /* Read the AT8035 PHY-Specific Status register, which indicates the
+        * speed and duplex that the PHY is actually using, irrespective of
+        * whether we are in autoneg mode or not.
+        */
+       ss = phy_read(phydev, AT803X_SPECIFIC_STATUS);
+       if (ss < 0)
+               return ss;
+
+       if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
+               switch (ss & AT803X_SS_SPEED_MASK) {
+               case AT803X_SS_SPEED_10:
+                       phydev->speed = SPEED_10;
+                       break;
+               case AT803X_SS_SPEED_100:
+                       phydev->speed = SPEED_100;
+                       break;
+               case AT803X_SS_SPEED_1000:
+                       phydev->speed = SPEED_1000;
+                       break;
+               }
+               if (ss & AT803X_SS_DUPLEX)
+                       phydev->duplex = DUPLEX_FULL;
+               else
+                       phydev->duplex = DUPLEX_HALF;
+               if (ss & AT803X_SS_MDIX)
+                       phydev->mdix = ETH_TP_MDI_X;
+               else
+                       phydev->mdix = ETH_TP_MDI;
+       }
+
+       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+               phy_resolve_aneg_pause(phydev);
+
+       return 0;
+}
+
 static struct phy_driver at803x_driver[] = {
 {
        /* ATHEROS 8035 */
@@ -370,6 +437,7 @@ static struct phy_driver at803x_driver[] = {
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        /* PHY_GBIT_FEATURES */
+       .read_status            = at803x_read_status,
        .ack_interrupt          = at803x_ack_interrupt,
        .config_intr            = at803x_config_intr,
 }, {
@@ -399,6 +467,7 @@ static struct phy_driver at803x_driver[] = {
        .suspend                = at803x_suspend,
        .resume                 = at803x_resume,
        /* PHY_GBIT_FEATURES */
+       .read_status            = at803x_read_status,
        .aneg_done              = at803x_aneg_done,
        .ack_interrupt          = &at803x_ack_interrupt,
        .config_intr            = &at803x_config_intr,
index 8fc33867e524f865261b68f8c2f74ab0d8c6d660..af8eabe7a6d4438876877b634f6a51f47449f691 100644 (file)
@@ -572,6 +572,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
        .name           = _name,                                        \
        /* PHY_BASIC_FEATURES */                                        \
        .flags          = PHY_IS_INTERNAL,                              \
+       .soft_reset     = genphy_soft_reset,                            \
        .config_init    = bcm7xxx_config_init,                          \
        .suspend        = bcm7xxx_suspend,                              \
        .resume         = bcm7xxx_config_init,                          \
index e282600bd83e269e7d91973599e7b7f0329da12e..c1d345c3cab358f1ca8538ade19c1924708b33ee 100644 (file)
@@ -121,7 +121,7 @@ void mdio_device_reset(struct mdio_device *mdiodev, int value)
                return;
 
        if (mdiodev->reset_gpio)
-               gpiod_set_value(mdiodev->reset_gpio, value);
+               gpiod_set_value_cansleep(mdiodev->reset_gpio, value);
 
        if (mdiodev->reset_ctrl) {
                if (value)
index 2fea5541c35a8a02910a09bbbaa0de7b9fcbc6c1..63dedec0433def38409d22df1b8db08ff2954005 100644 (file)
@@ -341,6 +341,35 @@ static int ksz8041_config_aneg(struct phy_device *phydev)
        return genphy_config_aneg(phydev);
 }
 
+static int ksz8051_ksz8795_match_phy_device(struct phy_device *phydev,
+                                           const u32 ksz_phy_id)
+{
+       int ret;
+
+       if ((phydev->phy_id & MICREL_PHY_ID_MASK) != ksz_phy_id)
+               return 0;
+
+       ret = phy_read(phydev, MII_BMSR);
+       if (ret < 0)
+               return ret;
+
+       /* KSZ8051 PHY and KSZ8794/KSZ8795/KSZ8765 switch share the same
+        * exact PHY ID. However, they can be told apart by the extended
+        * capability registers presence. The KSZ8051 PHY has them while
+        * the switch does not.
+        */
+       ret &= BMSR_ERCAP;
+       if (ksz_phy_id == PHY_ID_KSZ8051)
+               return ret;
+       else
+               return !ret;
+}
+
+static int ksz8051_match_phy_device(struct phy_device *phydev)
+{
+       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ8051);
+}
+
 static int ksz8081_config_init(struct phy_device *phydev)
 {
        /* KSZPHY_OMSO_FACTORY_TEST is set at de-assertion of the reset line
@@ -364,6 +393,11 @@ static int ksz8061_config_init(struct phy_device *phydev)
        return kszphy_config_init(phydev);
 }
 
+static int ksz8795_match_phy_device(struct phy_device *phydev)
+{
+       return ksz8051_ksz8795_match_phy_device(phydev, PHY_ID_KSZ87XX);
+}
+
 static int ksz9021_load_values_from_of(struct phy_device *phydev,
                                       const struct device_node *of_node,
                                       u16 reg,
@@ -1017,8 +1051,6 @@ static struct phy_driver ksphy_driver[] = {
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
-       .phy_id         = PHY_ID_KSZ8051,
-       .phy_id_mask    = MICREL_PHY_ID_MASK,
        .name           = "Micrel KSZ8051",
        /* PHY_BASIC_FEATURES */
        .driver_data    = &ksz8051_type,
@@ -1029,6 +1061,7 @@ static struct phy_driver ksphy_driver[] = {
        .get_sset_count = kszphy_get_sset_count,
        .get_strings    = kszphy_get_strings,
        .get_stats      = kszphy_get_stats,
+       .match_phy_device = ksz8051_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
@@ -1141,13 +1174,12 @@ static struct phy_driver ksphy_driver[] = {
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
-       .phy_id         = PHY_ID_KSZ8795,
-       .phy_id_mask    = MICREL_PHY_ID_MASK,
-       .name           = "Micrel KSZ8795",
+       .name           = "Micrel KSZ87XX Switch",
        /* PHY_BASIC_FEATURES */
        .config_init    = kszphy_config_init,
        .config_aneg    = ksz8873mll_config_aneg,
        .read_status    = ksz8873mll_read_status,
+       .match_phy_device = ksz8795_match_phy_device,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
 }, {
index 7935593debb11eaeec20a931350d64dc0746b82d..a1caeee1223617dab21b488858b14b5d0ef2aa2a 100644 (file)
@@ -323,6 +323,8 @@ int genphy_c45_read_pma(struct phy_device *phydev)
 {
        int val;
 
+       linkmode_zero(phydev->lp_advertising);
+
        val = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_CTRL1);
        if (val < 0)
                return val;
index 369903d9b6eceba076ba295b80a28fed700297ed..9412669b579c7d1292f41ab32fac5197ecf5778d 100644 (file)
@@ -283,6 +283,18 @@ void of_set_phy_eee_broken(struct phy_device *phydev)
        phydev->eee_broken_modes = broken;
 }
 
+void phy_resolve_aneg_pause(struct phy_device *phydev)
+{
+       if (phydev->duplex == DUPLEX_FULL) {
+               phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+                                                 phydev->lp_advertising);
+               phydev->asym_pause = linkmode_test_bit(
+                       ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+                       phydev->lp_advertising);
+       }
+}
+EXPORT_SYMBOL_GPL(phy_resolve_aneg_pause);
+
 /**
  * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
  * @phydev: The phy_device struct
@@ -305,13 +317,7 @@ void phy_resolve_aneg_linkmode(struct phy_device *phydev)
                        break;
                }
 
-       if (phydev->duplex == DUPLEX_FULL) {
-               phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
-                                                 phydev->lp_advertising);
-               phydev->asym_pause = linkmode_test_bit(
-                       ETHTOOL_LINK_MODE_Asym_Pause_BIT,
-                       phydev->lp_advertising);
-       }
+       phy_resolve_aneg_pause(phydev);
 }
 EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode);
 
index 7c92afd36bbeb98f1f372b67b335f00a0da1772c..105d389b58e74512da4e424af100ba93855c0548 100644 (file)
@@ -457,6 +457,11 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
                                                           val);
                                change_autoneg = true;
                                break;
+                       case MII_CTRL1000:
+                               mii_ctrl1000_mod_linkmode_adv_t(phydev->advertising,
+                                                               val);
+                               change_autoneg = true;
+                               break;
                        default:
                                /* do nothing */
                                break;
@@ -567,9 +572,6 @@ int phy_start_aneg(struct phy_device *phydev)
        if (AUTONEG_DISABLE == phydev->autoneg)
                phy_sanitize_settings(phydev);
 
-       /* Invalidate LP advertising flags */
-       linkmode_zero(phydev->lp_advertising);
-
        err = phy_config_aneg(phydev);
        if (err < 0)
                goto out_unlock;
index d347ddcac45bd9b188e62825bfa541e5a7f33217..adb66a2fae18b3e54e671e497635ffcc2db803ba 100644 (file)
@@ -1783,34 +1783,18 @@ done:
 }
 EXPORT_SYMBOL(genphy_update_link);
 
-/**
- * genphy_read_status - check the link status and update current link state
- * @phydev: target phy_device struct
- *
- * Description: Check the link, then figure out the current state
- *   by comparing what we advertise with what the link partner
- *   advertises.  Start by checking the gigabit possibilities,
- *   then move on to 10/100.
- */
-int genphy_read_status(struct phy_device *phydev)
+int genphy_read_lpa(struct phy_device *phydev)
 {
-       int lpa, lpagb, err, old_link = phydev->link;
-
-       /* Update the link, but return if there was an error */
-       err = genphy_update_link(phydev);
-       if (err)
-               return err;
-
-       /* why bother the PHY if nothing can have changed */
-       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
-               return 0;
+       int lpa, lpagb;
 
-       phydev->speed = SPEED_UNKNOWN;
-       phydev->duplex = DUPLEX_UNKNOWN;
-       phydev->pause = 0;
-       phydev->asym_pause = 0;
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (!phydev->autoneg_complete) {
+                       mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising,
+                                                       0);
+                       mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, 0);
+                       return 0;
+               }
 
-       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
                if (phydev->is_gigabit_capable) {
                        lpagb = phy_read(phydev, MII_STAT1000);
                        if (lpagb < 0)
@@ -1838,6 +1822,46 @@ int genphy_read_status(struct phy_device *phydev)
                        return lpa;
 
                mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa);
+       } else {
+               linkmode_zero(phydev->lp_advertising);
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(genphy_read_lpa);
+
+/**
+ * genphy_read_status - check the link status and update current link state
+ * @phydev: target phy_device struct
+ *
+ * Description: Check the link, then figure out the current state
+ *   by comparing what we advertise with what the link partner
+ *   advertises.  Start by checking the gigabit possibilities,
+ *   then move on to 10/100.
+ */
+int genphy_read_status(struct phy_device *phydev)
+{
+       int err, old_link = phydev->link;
+
+       /* Update the link, but return if there was an error */
+       err = genphy_update_link(phydev);
+       if (err)
+               return err;
+
+       /* why bother the PHY if nothing can have changed */
+       if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+               return 0;
+
+       phydev->speed = SPEED_UNKNOWN;
+       phydev->duplex = DUPLEX_UNKNOWN;
+       phydev->pause = 0;
+       phydev->asym_pause = 0;
+
+       err = genphy_read_lpa(phydev);
+       if (err < 0)
+               return err;
+
+       if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) {
                phy_resolve_aneg_linkmode(phydev);
        } else if (phydev->autoneg == AUTONEG_DISABLE) {
                int bmcr = phy_read(phydev, MII_BMCR);
index a5a57ca94c1ac541092867b685fe7d616218f96b..a578f7ebf715e69680b23e35c1aec90d33f200f4 100644 (file)
@@ -87,8 +87,24 @@ struct phylink {
        phylink_printk(KERN_WARNING, pl, fmt, ##__VA_ARGS__)
 #define phylink_info(pl, fmt, ...) \
        phylink_printk(KERN_INFO, pl, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
 #define phylink_dbg(pl, fmt, ...) \
+do {                                                                   \
+       if ((pl)->config->type == PHYLINK_NETDEV)                       \
+               netdev_dbg((pl)->netdev, fmt, ##__VA_ARGS__);           \
+       else if ((pl)->config->type == PHYLINK_DEV)                     \
+               dev_dbg((pl)->dev, fmt, ##__VA_ARGS__);                 \
+} while (0)
+#elif defined(DEBUG)
+#define phylink_dbg(pl, fmt, ...)                                      \
        phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__)
+#else
+#define phylink_dbg(pl, fmt, ...)                                      \
+({                                                                     \
+       if (0)                                                          \
+               phylink_printk(KERN_DEBUG, pl, fmt, ##__VA_ARGS__);     \
+})
+#endif
 
 /**
  * phylink_set_port_modes() - set the port type modes in the ethtool mask
@@ -576,7 +592,7 @@ static int phylink_register_sfp(struct phylink *pl,
 
 /**
  * phylink_create() - create a phylink instance
- * @ndev: a pointer to the &struct net_device
+ * @config: a pointer to the target &struct phylink_config
  * @fwnode: a pointer to a &struct fwnode_handle describing the network
  *     interface
  * @iface: the desired link mode defined by &typedef phy_interface_t
index dc3d92d340c4d7a019c53706c96b0513cee13356..b732982507939563d08a49f7c6ec22d86f1b0dcf 100644 (file)
@@ -327,6 +327,7 @@ static struct phy_driver smsc_phy_driver[] = {
        .name           = "SMSC LAN8740",
 
        /* PHY_BASIC_FEATURES */
+       .flags          = PHY_RST_AFTER_CLK_EN,
 
        .probe          = smsc_phy_probe,
 
index 9a1b006904a7dfa3ec5aa98c7cfd6b9a6d9c5a34..61824bbb55887232ce61c0d0b5f5bc304274ed3e 100644 (file)
@@ -1324,8 +1324,6 @@ static int ppp_dev_init(struct net_device *dev)
 {
        struct ppp *ppp;
 
-       netdev_lockdep_set_classes(dev);
-
        ppp = netdev_priv(dev);
        /* Let the netdevice take a reference on the ppp file. This ensures
         * that ppp_destroy_interface() won't run before the device gets
index 734de7de03f7893158e1370056f5193f82b7bd54..e1fabb3e3246f351dd34de0ae51e6d386148e1a1 100644 (file)
@@ -238,7 +238,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
        skb_dst_drop(skb);
        skb_dst_set(skb, &rt->dst);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->ip_summed = CHECKSUM_NONE;
        ip_select_ident(net, skb, NULL);
@@ -358,7 +358,7 @@ static int pptp_rcv(struct sk_buff *skb)
        po = lookup_chan(htons(header->call_id), iph->saddr);
        if (po) {
                skb_dst_drop(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
                return sk_receive_skb(sk_pppox(po), skb, 0);
        }
 drop:
index e8089def5a4671bad10efbde7b00297e4c37df69..8156b33ee3e7919188127c9ed7e732d89501eb66 100644 (file)
@@ -1615,7 +1615,6 @@ static int team_init(struct net_device *dev)
        int err;
 
        team->dev = dev;
-       mutex_init(&team->lock);
        team_set_no_mode(team);
 
        team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
@@ -1642,7 +1641,8 @@ static int team_init(struct net_device *dev)
                goto err_options_register;
        netif_carrier_off(dev);
 
-       netdev_lockdep_set_classes(dev);
+       lockdep_register_key(&team->team_lock_key);
+       __mutex_init(&team->lock, "team->team_lock_key", &team->team_lock_key);
 
        return 0;
 
@@ -1673,6 +1673,7 @@ static void team_uninit(struct net_device *dev)
        team_queue_override_fini(team);
        mutex_unlock(&team->lock);
        netdev_change_features(dev);
+       lockdep_unregister_key(&team->team_lock_key);
 }
 
 static void team_destructor(struct net_device *dev)
@@ -1976,8 +1977,15 @@ static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
        err = team_port_del(team, port_dev);
        mutex_unlock(&team->lock);
 
-       if (!err)
-               netdev_change_features(dev);
+       if (err)
+               return err;
+
+       if (netif_is_team_master(port_dev)) {
+               lockdep_unregister_key(&team->team_lock_key);
+               lockdep_register_key(&team->team_lock_key);
+               lockdep_set_class(&team->lock, &team->team_lock_key);
+       }
+       netdev_change_features(dev);
 
        return err;
 }
index aab0be40d4430ab1c1fed92782f4a263f38020bd..a8d3141582a53caf407dc9aff61c452998de068f 100644 (file)
@@ -526,8 +526,8 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
        e = tun_flow_find(head, rxhash);
        if (likely(e)) {
                /* TODO: keep queueing to old queue until it's empty? */
-               if (e->queue_index != queue_index)
-                       e->queue_index = queue_index;
+               if (READ_ONCE(e->queue_index) != queue_index)
+                       WRITE_ONCE(e->queue_index, queue_index);
                if (e->updated != jiffies)
                        e->updated = jiffies;
                sock_rps_record_flow_hash(e->rps_rxhash);
@@ -1104,7 +1104,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        skb_orphan(skb);
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (ptr_ring_produce(&tfile->tx_ring, skb))
                goto drop;
index 32f53de5b1fe7abff0eb2882240bcd295977af08..fe630438f67b040e28e81675cb54a44b863acb83 100644 (file)
@@ -787,6 +787,13 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
+                       USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* NVIDIA Tegra USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
 {
        USB_DEVICE_AND_INTERFACE_INFO(NVIDIA_VENDOR_ID, 0x09ff, USB_CLASS_COMM,
index ce78714f536f77ca4054baa265e86cca6ae0ee8d..74849da031fab8f4944751b0fc78431fdfbbc8c8 100644 (file)
@@ -186,7 +186,7 @@ struct hso_tiocmget {
        int    intr_completed;
        struct usb_endpoint_descriptor *endp;
        struct urb *urb;
-       struct hso_serial_state_notification serial_state_notification;
+       struct hso_serial_state_notification *serial_state_notification;
        u16    prev_UART_state_bitmap;
        struct uart_icount icount;
 };
@@ -1432,7 +1432,7 @@ static int tiocmget_submit_urb(struct hso_serial *serial,
                         usb_rcvintpipe(usb,
                                        tiocmget->endp->
                                        bEndpointAddress & 0x7F),
-                        &tiocmget->serial_state_notification,
+                        tiocmget->serial_state_notification,
                         sizeof(struct hso_serial_state_notification),
                         tiocmget_intr_callback, serial,
                         tiocmget->endp->bInterval);
@@ -1479,7 +1479,7 @@ static void tiocmget_intr_callback(struct urb *urb)
        /* wIndex should be the USB interface number of the port to which the
         * notification applies, which should always be the Modem port.
         */
-       serial_state_notification = &tiocmget->serial_state_notification;
+       serial_state_notification = tiocmget->serial_state_notification;
        if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
            serial_state_notification->bNotification != B_NOTIFICATION ||
            le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
@@ -2565,6 +2565,8 @@ static void hso_free_tiomget(struct hso_serial *serial)
                usb_free_urb(tiocmget->urb);
                tiocmget->urb = NULL;
                serial->tiocmget = NULL;
+               kfree(tiocmget->serial_state_notification);
+               tiocmget->serial_state_notification = NULL;
                kfree(tiocmget);
        }
 }
@@ -2615,19 +2617,26 @@ static struct hso_device *hso_create_bulk_serial_device(
                num_urbs = 2;
                serial->tiocmget = kzalloc(sizeof(struct hso_tiocmget),
                                           GFP_KERNEL);
+               serial->tiocmget->serial_state_notification
+                       = kzalloc(sizeof(struct hso_serial_state_notification),
+                                          GFP_KERNEL);
                /* it isn't going to break our heart if serial->tiocmget
                 *  allocation fails don't bother checking this.
                 */
-               if (serial->tiocmget) {
+               if (serial->tiocmget && serial->tiocmget->serial_state_notification) {
                        tiocmget = serial->tiocmget;
+                       tiocmget->endp = hso_get_ep(interface,
+                                                   USB_ENDPOINT_XFER_INT,
+                                                   USB_DIR_IN);
+                       if (!tiocmget->endp) {
+                               dev_err(&interface->dev, "Failed to find INT IN ep\n");
+                               goto exit;
+                       }
+
                        tiocmget->urb = usb_alloc_urb(0, GFP_KERNEL);
                        if (tiocmget->urb) {
                                mutex_init(&tiocmget->mutex);
                                init_waitqueue_head(&tiocmget->waitq);
-                               tiocmget->endp = hso_get_ep(
-                                       interface,
-                                       USB_ENDPOINT_XFER_INT,
-                                       USB_DIR_IN);
                        } else
                                hso_free_tiomget(serial);
                }
index 58f5a219fb65cdc8417ee9b19a1660b82f6f56c4..f24a1b0b801f032aec74d910c9e13ecec3559f80 100644 (file)
@@ -1264,8 +1264,11 @@ static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
                netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
                lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
 
-               if (dev->domain_data.phyirq > 0)
+               if (dev->domain_data.phyirq > 0) {
+                       local_irq_disable();
                        generic_handle_irq(dev->domain_data.phyirq);
+                       local_irq_enable();
+               }
        } else
                netdev_warn(dev->net,
                            "unexpected interrupt: 0x%08x\n", intdata);
@@ -3782,10 +3785,14 @@ static int lan78xx_probe(struct usb_interface *intf,
        /* driver requires remote-wakeup capability during autosuspend. */
        intf->needs_remote_wakeup = 1;
 
+       ret = lan78xx_phy_init(dev);
+       if (ret < 0)
+               goto out4;
+
        ret = register_netdev(netdev);
        if (ret != 0) {
                netif_err(dev, probe, netdev, "couldn't register the device\n");
-               goto out4;
+               goto out5;
        }
 
        usb_set_intfdata(intf, dev);
@@ -3798,14 +3805,10 @@ static int lan78xx_probe(struct usb_interface *intf,
        pm_runtime_set_autosuspend_delay(&udev->dev,
                                         DEFAULT_AUTOSUSPEND_DELAY);
 
-       ret = lan78xx_phy_init(dev);
-       if (ret < 0)
-               goto out5;
-
        return 0;
 
 out5:
-       unregister_netdev(netdev);
+       phy_disconnect(netdev->phydev);
 out4:
        usb_free_urb(dev->urb_intr);
 out3:
index b6dc5d714b5e636c5b39fa199eb7fd64c1c125bd..596428ec71df6742d8b150e6c7076a79f477af94 100644 (file)
@@ -1327,6 +1327,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x0201, 4)},    /* TP-LINK HSUPA Modem MA180 */
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */
+       {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */
        {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)},    /* Telit ME910 */
        {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)},    /* Telit ME910 dual modem */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
@@ -1350,6 +1351,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)},    /* Cinterion PHxx,PXxx (2 RmNet) */
        {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)},    /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
+       {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
        {QMI_FIXED_INTF(0x413c, 0x81a2, 8)},    /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a3, 8)},    /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
        {QMI_FIXED_INTF(0x413c, 0x81a4, 8)},    /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
index 08726090570e192b40b106892e9fc4e625534cd7..d4a95b50bda6b22004472e74fcb7bd2faa4e55d9 100644 (file)
@@ -4799,10 +4799,9 @@ static int rtl8152_reset_resume(struct usb_interface *intf)
        struct r8152 *tp = usb_get_intfdata(intf);
 
        clear_bit(SELECTIVE_SUSPEND, &tp->flags);
-       mutex_lock(&tp->control);
        tp->rtl_ops.init(tp);
        queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0);
-       mutex_unlock(&tp->control);
+       set_ethernet_addr(tp);
        return rtl8152_resume(intf);
 }
 
@@ -5756,6 +5755,7 @@ static const struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x720c)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7214)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0xa387)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
        {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA,  0x09ff)},
        {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK,  0x0601)},
index c5d4a0060124cf238543b3c9f3f9de603749a708..681e0def6356ba8d4ea6a3c0d9982d09f3c9973b 100644 (file)
@@ -335,7 +335,7 @@ static void sr_set_multicast(struct net_device *net)
 static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
 {
        struct usbnet *dev = netdev_priv(net);
-       __le16 res;
+       __le16 res = 0;
 
        mutex_lock(&dev->phy_mutex);
        sr_set_sw_mii(dev);
index ba98e0971b842df0a6e2c690773eb3ee32986b47..5a635f028bdcffb33563f51ad483f86afd3e0fbf 100644 (file)
@@ -1585,7 +1585,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Don't wait up for transmitted skbs to be freed. */
        if (!use_napi) {
                skb_orphan(skb);
-               nf_reset(skb);
+               nf_reset_ct(skb);
        }
 
        /* If running out of space, stop queue to avoid getting packets that we
index a4b38a980c3cbd0ecdc148fd2922b9bda33e6478..b8228f50bc9412e8d6037510b0cc46df55369202 100644 (file)
@@ -366,7 +366,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
        struct neighbour *neigh;
        int ret;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb->protocol = htons(ETH_P_IPV6);
        skb->dev = dev;
@@ -459,7 +459,7 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
@@ -560,7 +560,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
        bool is_v6gw = false;
        int ret = -EINVAL;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* Be paranoid, rather than too clever. */
        if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -670,7 +670,7 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
 
        /* reset skb device */
        if (likely(err == 1))
-               nf_reset(skb);
+               nf_reset_ct(skb);
        else
                skb = NULL;
 
@@ -865,7 +865,6 @@ static int vrf_dev_init(struct net_device *dev)
 
        /* similarly, oper state is irrelevant; set to up to avoid confusion */
        dev->operstate = IF_OPER_UP;
-       netdev_lockdep_set_classes(dev);
        return 0;
 
 out_rth:
index 3d9bcc957f7da15b58aad5d9cb6acdb9803f4553..8869154fad8895194a3a101f93119890a930c542 100644 (file)
@@ -2487,9 +2487,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
                vni = tunnel_id_to_key32(info->key.tun_id);
                ifindex = 0;
                dst_cache = &info->dst_cache;
-               if (info->options_len &&
-                   info->key.tun_flags & TUNNEL_VXLAN_OPT)
+               if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+                       if (info->options_len < sizeof(*md))
+                               goto drop;
                        md = ip_tunnel_info_opts(info);
+               }
                ttl = info->key.ttl;
                tos = info->key.tos;
                label = info->key.label;
@@ -3566,10 +3568,13 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
 {
        struct vxlan_net *vn = net_generic(net, vxlan_net_id);
        struct vxlan_dev *vxlan = netdev_priv(dev);
+       struct net_device *remote_dev = NULL;
        struct vxlan_fdb *f = NULL;
        bool unregister = false;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_dev_configure(net, dev, conf, false, extack);
        if (err)
                return err;
@@ -3577,14 +3582,14 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
        dev->ethtool_ops = &vxlan_ethtool_ops;
 
        /* create an fdb entry for a valid default destination */
-       if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
+       if (!vxlan_addr_any(&dst->remote_ip)) {
                err = vxlan_fdb_create(vxlan, all_zeros_mac,
-                                      &vxlan->default_dst.remote_ip,
+                                      &dst->remote_ip,
                                       NUD_REACHABLE | NUD_PERMANENT,
                                       vxlan->cfg.dst_port,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_vni,
-                                      vxlan->default_dst.remote_ifindex,
+                                      dst->remote_vni,
+                                      dst->remote_vni,
+                                      dst->remote_ifindex,
                                       NTF_SELF, &f);
                if (err)
                        return err;
@@ -3595,26 +3600,41 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
                goto errout;
        unregister = true;
 
+       if (dst->remote_ifindex) {
+               remote_dev = __dev_get_by_index(net, dst->remote_ifindex);
+               if (!remote_dev)
+                       goto errout;
+
+               err = netdev_upper_dev_link(remote_dev, dev, extack);
+               if (err)
+                       goto errout;
+       }
+
        err = rtnl_configure_link(dev, NULL);
        if (err)
-               goto errout;
+               goto unlink;
 
        if (f) {
-               vxlan_fdb_insert(vxlan, all_zeros_mac,
-                                vxlan->default_dst.remote_vni, f);
+               vxlan_fdb_insert(vxlan, all_zeros_mac, dst->remote_vni, f);
 
                /* notify default fdb entry */
                err = vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f),
                                       RTM_NEWNEIGH, true, extack);
                if (err) {
                        vxlan_fdb_destroy(vxlan, f, false, false);
+                       if (remote_dev)
+                               netdev_upper_dev_unlink(remote_dev, dev);
                        goto unregister;
                }
        }
 
        list_add(&vxlan->next, &vn->vxlan_list);
+       if (remote_dev)
+               dst->remote_dev = remote_dev;
        return 0;
-
+unlink:
+       if (remote_dev)
+               netdev_upper_dev_unlink(remote_dev, dev);
 errout:
        /* unregister_netdevice() destroys the default FDB entry with deletion
         * notification. But the addition notification was not sent yet, so
@@ -3932,11 +3952,12 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                            struct netlink_ext_ack *extack)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
-       struct vxlan_rdst *dst = &vxlan->default_dst;
        struct net_device *lowerdev;
        struct vxlan_config conf;
+       struct vxlan_rdst *dst;
        int err;
 
+       dst = &vxlan->default_dst;
        err = vxlan_nl2conf(tb, data, dev, &conf, true, extack);
        if (err)
                return err;
@@ -3946,6 +3967,14 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (err)
                return err;
 
+       if (dst->remote_dev == lowerdev)
+               lowerdev = NULL;
+
+       err = netdev_adjacent_change_prepare(dst->remote_dev, lowerdev, dev,
+                                            extack);
+       if (err)
+               return err;
+
        /* handle default dst entry */
        if (!vxlan_addr_equal(&conf.remote_ip, &dst->remote_ip)) {
                u32 hash_index = fdb_head_index(vxlan, all_zeros_mac, conf.vni);
@@ -3962,6 +3991,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
                                               NTF_SELF, true, extack);
                        if (err) {
                                spin_unlock_bh(&vxlan->hash_lock[hash_index]);
+                               netdev_adjacent_change_abort(dst->remote_dev,
+                                                            lowerdev, dev);
                                return err;
                        }
                }
@@ -3979,6 +4010,11 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
        if (conf.age_interval != vxlan->cfg.age_interval)
                mod_timer(&vxlan->age_timer, jiffies);
 
+       netdev_adjacent_change_commit(dst->remote_dev, lowerdev, dev);
+       if (lowerdev && lowerdev != dst->remote_dev) {
+               dst->remote_dev = lowerdev;
+               netdev_update_lockdep_key(lowerdev);
+       }
        vxlan_config_apply(dev, &conf, lowerdev, vxlan->net, true);
        return 0;
 }
@@ -3991,6 +4027,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
 
        list_del(&vxlan->next);
        unregister_netdevice_queue(dev, head);
+       if (vxlan->default_dst.remote_dev)
+               netdev_upper_dev_unlink(vxlan->default_dst.remote_dev, dev);
 }
 
 static size_t vxlan_get_size(const struct net_device *dev)
index 8efb493ceec2fc96cda43cb56cde5e6d19e91015..5c79f052cad20925c63e6c72adda3a97af3ee0fd 100644 (file)
@@ -127,12 +127,12 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
                        "%d\n", result);
        result = 0;
 error_cmd:
-       kfree(cmd);
        kfree_skb(ack_skb);
 error_msg_to_dev:
 error_alloc:
        d_fnend(4, dev, "(wimax_dev %p state %d) = %d\n",
                wimax_dev, state, result);
+       kfree(cmd);
        return result;
 }
 
index dc45d16e8d21445f978e6831579a12a78357d57b..383d4fa555a889f16b642171b23301c1f3651132 100644 (file)
@@ -2118,12 +2118,15 @@ static int ath10k_init_uart(struct ath10k *ar)
                return ret;
        }
 
-       if (!uart_print && ar->hw_params.uart_pin_workaround) {
-               ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
-                                        ar->hw_params.uart_pin);
-               if (ret) {
-                       ath10k_warn(ar, "failed to set UART TX pin: %d", ret);
-                       return ret;
+       if (!uart_print) {
+               if (ar->hw_params.uart_pin_workaround) {
+                       ret = ath10k_bmi_write32(ar, hi_dbg_uart_txpin,
+                                                ar->hw_params.uart_pin);
+                       if (ret) {
+                               ath10k_warn(ar, "failed to set UART TX pin: %d",
+                                           ret);
+                               return ret;
+                       }
                }
 
                return 0;
index 7573af2d88ce7cc8e35a45bcc050bac55a20d674..c2db758b9d54df6c9b6cf701f03cd8c879a08a51 100644 (file)
@@ -162,12 +162,13 @@ int iwl_acpi_get_mcc(struct device *dev, char *mcc)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_WRDD_WIFI_DATA_SIZE,
                                         &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -224,12 +225,13 @@ int iwl_acpi_get_eckv(struct device *dev, u32 *extl_clk)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(dev, data, ACPI_ECKV_WIFI_DATA_SIZE,
                                         &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
index 39c64850cb6f04ca25ff305a5cb4b9fe94d9584b..c0750ced5ac2952f6dec3965abefca731530c4db 100644 (file)
@@ -520,7 +520,7 @@ struct iwl_scan_dwell {
 } __packed;
 
 /**
- * struct iwl_scan_config
+ * struct iwl_scan_config_v1
  * @flags:                     enum scan_config_flags
  * @tx_chains:                 valid_tx antenna - ANT_* definitions
  * @rx_chains:                 valid_rx antenna - ANT_* definitions
@@ -552,7 +552,7 @@ struct iwl_scan_config_v1 {
 #define SCAN_LB_LMAC_IDX 0
 #define SCAN_HB_LMAC_IDX 1
 
-struct iwl_scan_config {
+struct iwl_scan_config_v2 {
        __le32 flags;
        __le32 tx_chains;
        __le32 rx_chains;
@@ -564,6 +564,24 @@ struct iwl_scan_config {
        u8 bcast_sta_id;
        u8 channel_flags;
        u8 channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S_2 */
+
+/**
+ * struct iwl_scan_config
+ * @enable_cam_mode: whether to enable CAM mode.
+ * @enable_promiscouos_mode: whether to enable promiscouos mode
+ * @bcast_sta_id: the index of the station in the fw
+ * @reserved: reserved
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ */
+struct iwl_scan_config {
+       u8 enable_cam_mode;
+       u8 enable_promiscouos_mode;
+       u8 bcast_sta_id;
+       u8 reserved;
+       __le32 tx_chains;
+       __le32 rx_chains;
 } __packed; /* SCAN_CONFIG_DB_CMD_API_S_3 */
 
 /**
index 5c8602de9168992836dbdf9aa6f86586b88118d1..87421807e040b2756464293aa101082aa510cffb 100644 (file)
@@ -646,6 +646,7 @@ static struct scatterlist *alloc_sgtable(int size)
                                if (new_page)
                                        __free_page(new_page);
                        }
+                       kfree(table);
                        return NULL;
                }
                alloc_size = min_t(int, size, PAGE_SIZE);
index 423cc0cf8e78139146664fd19d30d2a37f8e635b..0d5bc4ce5c072d0f4402f2dbd376f970f5409df3 100644 (file)
@@ -288,6 +288,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
  *     STA_CONTEXT_DOT11AX_API_S
  * @IWL_UCODE_TLV_CAPA_SAR_TABLE_VER: This ucode supports different sar
  *     version tables.
+ * @IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG: This ucode supports v3 of
+ *  SCAN_CONFIG_DB_CMD_API_S.
  *
  * @NUM_IWL_UCODE_TLV_API: number of bits used
  */
@@ -321,6 +323,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_WOWLAN_TCP_SYN_WAKE   = (__force iwl_ucode_tlv_api_t)53,
        IWL_UCODE_TLV_API_FTM_RTT_ACCURACY      = (__force iwl_ucode_tlv_api_t)54,
        IWL_UCODE_TLV_API_SAR_TABLE_VER         = (__force iwl_ucode_tlv_api_t)55,
+       IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG   = (__force iwl_ucode_tlv_api_t)56,
        IWL_UCODE_TLV_API_ADWELL_HB_DEF_N_AP    = (__force iwl_ucode_tlv_api_t)57,
        IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER     = (__force iwl_ucode_tlv_api_t)58,
 
index cb4c5514a55605c2d6215396244ab8b3f5daa514..695bbaa86273dd025f7d9322221ef2c439a97342 100644 (file)
  *         Indicates MAC is entering a power-saving sleep power-down.
  *         Not a good time to access device-internal resources.
  */
+#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE                     (0x00000004)
 #define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP         (0x00000010)
 #define CSR_GP_CNTRL_REG_FLAG_XTAL_ON               (0x00000400)
 
index f8e4f0f5de0c7251040627e39f8034dbb501bb4c..f09e368c7040c5868f0c2a076df262c4be3b0ded 100644 (file)
@@ -112,38 +112,38 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf);
  */
 static inline u32 iwl_umac_prph(struct iwl_trans *trans, u32 ofs)
 {
-       return ofs + trans->cfg->trans.umac_prph_offset;
+       return ofs + trans->trans_cfg->umac_prph_offset;
 }
 
 static inline u32 iwl_read_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs)
 {
        return iwl_read_prph_no_grab(trans, ofs +
-                                    trans->cfg->trans.umac_prph_offset);
+                                    trans->trans_cfg->umac_prph_offset);
 }
 
 static inline u32 iwl_read_umac_prph(struct iwl_trans *trans, u32 ofs)
 {
-       return iwl_read_prph(trans, ofs + trans->cfg->trans.umac_prph_offset);
+       return iwl_read_prph(trans, ofs + trans->trans_cfg->umac_prph_offset);
 }
 
 static inline void iwl_write_umac_prph_no_grab(struct iwl_trans *trans, u32 ofs,
                                               u32 val)
 {
-       iwl_write_prph_no_grab(trans,  ofs + trans->cfg->trans.umac_prph_offset,
+       iwl_write_prph_no_grab(trans,  ofs + trans->trans_cfg->umac_prph_offset,
                               val);
 }
 
 static inline void iwl_write_umac_prph(struct iwl_trans *trans, u32 ofs,
                                       u32 val)
 {
-       iwl_write_prph(trans,  ofs + trans->cfg->trans.umac_prph_offset, val);
+       iwl_write_prph(trans,  ofs + trans->trans_cfg->umac_prph_offset, val);
 }
 
 static inline int iwl_poll_umac_prph_bit(struct iwl_trans *trans, u32 addr,
                                         u32 bits, u32 mask, int timeout)
 {
        return iwl_poll_prph_bit(trans, addr +
-                                trans->cfg->trans.umac_prph_offset,
+                                trans->trans_cfg->umac_prph_offset,
                                 bits, mask, timeout);
 }
 
index f47e0f97acf89d3c5d9bc483b7e2b75eefc4f8ec..23c25a7665f27087a58bd556de4c955e81e7cabd 100644 (file)
@@ -449,6 +449,11 @@ enum {
 #define PERSISTENCE_BIT                        BIT(12)
 #define PREG_WFPM_ACCESS               BIT(12)
 
+#define HPM_HIPM_GEN_CFG                       0xA03458
+#define HPM_HIPM_GEN_CFG_CR_PG_EN              BIT(0)
+#define HPM_HIPM_GEN_CFG_CR_SLP_EN             BIT(1)
+#define HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE       BIT(10)
+
 #define UREG_DOORBELL_TO_ISR6          0xA05C04
 #define UREG_DOORBELL_TO_ISR6_NMI_BIT  BIT(0)
 #define UREG_DOORBELL_TO_ISR6_SUSPEND  BIT(18)
index 32a5e4e5461ff0a37e5282dff8285db42254ea64..d9eb2b2864383ffc59face511b5f888bee2ac0ff 100644 (file)
@@ -420,6 +420,9 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
        };
        int ret;
 
+       if (mvm->trans->cfg->tx_with_siso_diversity)
+               init_cfg.init_flags |= cpu_to_le32(BIT(IWL_INIT_PHY));
+
        lockdep_assert_held(&mvm->mutex);
 
        mvm->rfkill_safe_init_done = false;
@@ -694,12 +697,13 @@ static int iwl_mvm_sar_get_wrds_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_WRDS_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
-       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) {
+       if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -731,13 +735,14 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_EWRD_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
        if ((wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) ||
-           (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER)) {
+           (wifi_pkg->package.elements[2].type != ACPI_TYPE_INTEGER) ||
+           tbl_rev != 0) {
                ret = -EINVAL;
                goto out_free;
        }
@@ -791,11 +796,16 @@ static int iwl_mvm_sar_get_wgds_table(struct iwl_mvm *mvm)
 
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_WGDS_WIFI_DATA_SIZE, &tbl_rev);
-       if (IS_ERR(wifi_pkg) || tbl_rev > 1) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
+       if (tbl_rev != 0) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        mvm->geo_rev = tbl_rev;
        for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
                for (j = 0; j < ACPI_GEO_TABLE_SIZE; j++) {
@@ -889,15 +899,17 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
         * firmware versions.  Unfortunately, we don't have a TLV API
         * flag to rely on, so rely on the major version which is in
         * the first byte of ucode_ver.  This was implemented
-        * initially on version 38 and then backported to29 and 17.
-        * The intention was to have it in 36 as well, but not all
-        * 8000 family got this feature enabled.  The 8000 family is
-        * the only one using version 36, so skip this version
-        * entirely.
+        * initially on version 38 and then backported to 17.  It was
+        * also backported to 29, but only for 7265D devices.  The
+        * intention was to have it in 36 as well, but not all 8000
+        * family got this feature enabled.  The 8000 family is the
+        * only one using version 36, so skip this version entirely.
         */
        return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
-              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
-              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+              IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17 ||
+              (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 &&
+               ((mvm->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+                CSR_HW_REV_TYPE_7265D));
 }
 
 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
@@ -1020,11 +1032,16 @@ static int iwl_mvm_get_ppag_table(struct iwl_mvm *mvm)
        wifi_pkg = iwl_acpi_get_wifi_pkg(mvm->dev, data,
                                         ACPI_PPAG_WIFI_DATA_SIZE, &tbl_rev);
 
-       if (IS_ERR(wifi_pkg) || tbl_rev != 0) {
+       if (IS_ERR(wifi_pkg)) {
                ret = PTR_ERR(wifi_pkg);
                goto out_free;
        }
 
+       if (tbl_rev != 0) {
+               ret = -EINVAL;
+               goto out_free;
+       }
+
        enabled = &wifi_pkg->package.elements[1];
        if (enabled->type != ACPI_TYPE_INTEGER ||
            (enabled->integer.value != 0 && enabled->integer.value != 1)) {
index cd1b10042fbfc8cb55cf025d1285dfd85f67372c..d31f96c3f9251a2256bab9e181796e91289147cd 100644 (file)
@@ -4881,11 +4881,11 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
        if (!iwl_mvm_has_new_rx_api(mvm))
                return;
 
-       notif->cookie = mvm->queue_sync_cookie;
-
-       if (notif->sync)
+       if (notif->sync) {
+               notif->cookie = mvm->queue_sync_cookie;
                atomic_set(&mvm->queue_sync_counter,
                           mvm->trans->num_rx_queues);
+       }
 
        ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif,
                                      size, !notif->sync);
@@ -4905,7 +4905,8 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
 
 out:
        atomic_set(&mvm->queue_sync_counter, 0);
-       mvm->queue_sync_cookie++;
+       if (notif->sync)
+               mvm->queue_sync_cookie++;
 }
 
 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
index 843d00bf2bd55504d057075871968291f9897a3a..5ca50f39a023db3f5af56519ea9f135b74a204a5 100644 (file)
@@ -1405,6 +1405,12 @@ static inline bool iwl_mvm_is_scan_ext_chan_supported(struct iwl_mvm *mvm)
                          IWL_UCODE_TLV_API_SCAN_EXT_CHAN_VER);
 }
 
+static inline bool iwl_mvm_is_reduced_config_scan_supported(struct iwl_mvm *mvm)
+{
+       return fw_has_api(&mvm->fw->ucode_capa,
+                         IWL_UCODE_TLV_API_REDUCED_SCAN_CONFIG);
+}
+
 static inline bool iwl_mvm_has_new_rx_stats_api(struct iwl_mvm *mvm)
 {
        return fw_has_api(&mvm->fw->ucode_capa,
index f6b3045badbdec548b695c39e8744f6a1f3e9ced..fcafa22ec6ce222878a29496f6940347fe5620f4 100644 (file)
@@ -1137,11 +1137,11 @@ static void iwl_mvm_fill_scan_config_v1(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
-                                    u32 flags, u8 channel_flags,
-                                    u32 max_channels)
+static void iwl_mvm_fill_scan_config_v2(struct iwl_mvm *mvm, void *config,
+                                       u32 flags, u8 channel_flags,
+                                       u32 max_channels)
 {
-       struct iwl_scan_config *cfg = config;
+       struct iwl_scan_config_v2 *cfg = config;
 
        cfg->flags = cpu_to_le32(flags);
        cfg->tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
@@ -1185,7 +1185,7 @@ static void iwl_mvm_fill_scan_config(struct iwl_mvm *mvm, void *config,
        iwl_mvm_fill_channels(mvm, cfg->channel_array, max_channels);
 }
 
-int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm)
 {
        void *cfg;
        int ret, cmd_size;
@@ -1217,7 +1217,7 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        }
 
        if (iwl_mvm_cdb_scan_api(mvm))
-               cmd_size = sizeof(struct iwl_scan_config);
+               cmd_size = sizeof(struct iwl_scan_config_v2);
        else
                cmd_size = sizeof(struct iwl_scan_config_v1);
        cmd_size += num_channels;
@@ -1254,8 +1254,8 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
                        flags |= (iwl_mvm_is_scan_fragmented(hb_type)) ?
                                 SCAN_CONFIG_FLAG_SET_LMAC2_FRAGMENTED :
                                 SCAN_CONFIG_FLAG_CLEAR_LMAC2_FRAGMENTED;
-               iwl_mvm_fill_scan_config(mvm, cfg, flags, channel_flags,
-                                        num_channels);
+               iwl_mvm_fill_scan_config_v2(mvm, cfg, flags, channel_flags,
+                                           num_channels);
        } else {
                iwl_mvm_fill_scan_config_v1(mvm, cfg, flags, channel_flags,
                                            num_channels);
@@ -1277,6 +1277,30 @@ int iwl_mvm_config_scan(struct iwl_mvm *mvm)
        return ret;
 }
 
+int iwl_mvm_config_scan(struct iwl_mvm *mvm)
+{
+       struct iwl_scan_config cfg;
+       struct iwl_host_cmd cmd = {
+               .id = iwl_cmd_id(SCAN_CFG_CMD, IWL_ALWAYS_LONG_GROUP, 0),
+               .len[0] = sizeof(cfg),
+               .data[0] = &cfg,
+               .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+       };
+
+       if (!iwl_mvm_is_reduced_config_scan_supported(mvm))
+               return iwl_mvm_legacy_config_scan(mvm);
+
+       memset(&cfg, 0, sizeof(cfg));
+
+       cfg.bcast_sta_id = mvm->aux_sta.sta_id;
+       cfg.tx_chains = cpu_to_le32(iwl_mvm_get_valid_tx_ant(mvm));
+       cfg.rx_chains = cpu_to_le32(iwl_mvm_scan_rx_ant(mvm));
+
+       IWL_DEBUG_SCAN(mvm, "Sending UMAC scan config\n");
+
+       return iwl_mvm_send_cmd(mvm, &cmd);
+}
+
 static int iwl_mvm_scan_uid_by_status(struct iwl_mvm *mvm, int status)
 {
        int i;
index 0bedba4c61f2ae898c7ddbb612d041e9c9fdd7dc..b3768d5d852ae367394630a9e4d72d03b28ab2a9 100644 (file)
@@ -1482,6 +1482,13 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
                                            mvm_sta->sta_id, i);
                        txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
                                                         i, wdg);
+                       /*
+                        * on failures, just set it to IWL_MVM_INVALID_QUEUE
+                        * to try again later, we have no other good way of
+                        * failing here
+                        */
+                       if (txq_id < 0)
+                               txq_id = IWL_MVM_INVALID_QUEUE;
                        tid_data->txq_id = txq_id;
 
                        /*
@@ -1950,30 +1957,73 @@ void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
        sta->sta_id = IWL_MVM_INVALID_STA;
 }
 
-static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue,
+static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
                                          u8 sta_id, u8 fifo)
 {
        unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
                mvm->trans->trans_cfg->base_params->wd_timeout :
                IWL_WATCHDOG_DISABLED;
+       struct iwl_trans_txq_scd_cfg cfg = {
+               .fifo = fifo,
+               .sta_id = sta_id,
+               .tid = IWL_MAX_TID_COUNT,
+               .aggregate = false,
+               .frame_limit = IWL_FRAME_LIMIT,
+       };
+
+       WARN_ON(iwl_mvm_has_new_tx_api(mvm));
+
+       iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
+}
+
+static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
+{
+       unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ?
+               mvm->trans->trans_cfg->base_params->wd_timeout :
+               IWL_WATCHDOG_DISABLED;
+
+       WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
+
+       return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
+                                      wdg_timeout);
+}
 
+static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
+                                         int maccolor,
+                                         struct iwl_mvm_int_sta *sta,
+                                         u16 *queue, int fifo)
+{
+       int ret;
+
+       /* Map queue to fifo - needs to happen before adding station */
+       if (!iwl_mvm_has_new_tx_api(mvm))
+               iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
+
+       ret = iwl_mvm_add_int_sta_common(mvm, sta, NULL, macidx, maccolor);
+       if (ret) {
+               if (!iwl_mvm_has_new_tx_api(mvm))
+                       iwl_mvm_disable_txq(mvm, NULL, *queue,
+                                           IWL_MAX_TID_COUNT, 0);
+               return ret;
+       }
+
+       /*
+        * For 22000 firmware and on we cannot add queue to a station unknown
+        * to firmware so enable queue here - after the station was added
+        */
        if (iwl_mvm_has_new_tx_api(mvm)) {
-               int tvqm_queue =
-                       iwl_mvm_tvqm_enable_txq(mvm, sta_id,
-                                               IWL_MAX_TID_COUNT,
-                                               wdg_timeout);
-               *queue = tvqm_queue;
-       } else {
-               struct iwl_trans_txq_scd_cfg cfg = {
-                       .fifo = fifo,
-                       .sta_id = sta_id,
-                       .tid = IWL_MAX_TID_COUNT,
-                       .aggregate = false,
-                       .frame_limit = IWL_FRAME_LIMIT,
-               };
+               int txq;
 
-               iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout);
+               txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
+               if (txq < 0) {
+                       iwl_mvm_rm_sta_common(mvm, sta->sta_id);
+                       return txq;
+               }
+
+               *queue = txq;
        }
+
+       return 0;
 }
 
 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
@@ -1989,59 +2039,26 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
        if (ret)
                return ret;
 
-       /* Map Aux queue to fifo - needs to happen before adding Aux station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL,
-                                        MAC_INDEX_AUX, 0);
+       ret = iwl_mvm_add_int_sta_with_queue(mvm, MAC_INDEX_AUX, 0,
+                                            &mvm->aux_sta, &mvm->aux_queue,
+                                            IWL_MVM_TX_FIFO_MCAST);
        if (ret) {
                iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
                return ret;
        }
 
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue,
-                                             mvm->aux_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_MCAST);
-
        return 0;
 }
 
 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
 {
        struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-       int ret;
 
        lockdep_assert_held(&mvm->mutex);
 
-       /* Map snif queue to fifo - must happen before adding snif station */
-       if (!iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
+       return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
+                                             &mvm->snif_sta, &mvm->snif_queue,
                                              IWL_MVM_TX_FIFO_BE);
-
-       ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr,
-                                        mvmvif->id, 0);
-       if (ret)
-               return ret;
-
-       /*
-        * For 22000 firmware and on we cannot add queue to a station unknown
-        * to firmware so enable queue here - after the station was added
-        */
-       if (iwl_mvm_has_new_tx_api(mvm))
-               iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue,
-                                             mvm->snif_sta.sta_id,
-                                             IWL_MVM_TX_FIFO_BE);
-
-       return 0;
 }
 
 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2133,6 +2150,10 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
                                                IWL_MAX_TID_COUNT,
                                                wdg_timeout);
+               if (queue < 0) {
+                       iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
+                       return queue;
+               }
 
                if (vif->type == NL80211_IFTYPE_AP ||
                    vif->type == NL80211_IFTYPE_ADHOC)
@@ -2307,10 +2328,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
        }
        ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
                                         mvmvif->id, mvmvif->color);
-       if (ret) {
-               iwl_mvm_dealloc_int_sta(mvm, msta);
-               return ret;
-       }
+       if (ret)
+               goto err;
 
        /*
         * Enable cab queue after the ADD_STA command is sent.
@@ -2323,6 +2342,10 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
                                                    0,
                                                    timeout);
+               if (queue < 0) {
+                       ret = queue;
+                       goto err;
+               }
                mvmvif->cab_queue = queue;
        } else if (!fw_has_api(&mvm->fw->ucode_capa,
                               IWL_UCODE_TLV_API_STA_TYPE))
@@ -2330,6 +2353,9 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
                                   timeout);
 
        return 0;
+err:
+       iwl_mvm_dealloc_int_sta(mvm, msta);
+       return ret;
 }
 
 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
index 75fa8a6aafeec951e71408fa166b47014fdddb49..74980382e64c89e1def4e1106a5439488a858ea9 100644 (file)
@@ -107,13 +107,9 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
 
        /* allocate ucode sections in dram and set addresses */
        ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
-       if (ret) {
-               dma_free_coherent(trans->dev,
-                                 sizeof(*prph_scratch),
-                                 prph_scratch,
-                                 trans_pcie->prph_scratch_dma_addr);
-               return ret;
-       }
+       if (ret)
+               goto err_free_prph_scratch;
+
 
        /* Allocate prph information
         * currently we don't assign to the prph info anything, but it would get
@@ -121,16 +117,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
        prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
                                       &trans_pcie->prph_info_dma_addr,
                                       GFP_KERNEL);
-       if (!prph_info)
-               return -ENOMEM;
+       if (!prph_info) {
+               ret = -ENOMEM;
+               goto err_free_prph_scratch;
+       }
 
        /* Allocate context info */
        ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
                                            sizeof(*ctxt_info_gen3),
                                            &trans_pcie->ctxt_info_dma_addr,
                                            GFP_KERNEL);
-       if (!ctxt_info_gen3)
-               return -ENOMEM;
+       if (!ctxt_info_gen3) {
+               ret = -ENOMEM;
+               goto err_free_prph_info;
+       }
 
        ctxt_info_gen3->prph_info_base_addr =
                cpu_to_le64(trans_pcie->prph_info_dma_addr);
@@ -186,6 +186,20 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
                iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
 
        return 0;
+
+err_free_prph_info:
+       dma_free_coherent(trans->dev,
+                         sizeof(*prph_info),
+                       prph_info,
+                       trans_pcie->prph_info_dma_addr);
+
+err_free_prph_scratch:
+       dma_free_coherent(trans->dev,
+                         sizeof(*prph_scratch),
+                       prph_scratch,
+                       trans_pcie->prph_scratch_dma_addr);
+       return ret;
+
 }
 
 void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
index e29c47744ef59bca551103278b409c4ae09ac2a2..040cec17d3ad68c4e2d38ee1af19f686c1bbc90e 100644 (file)
@@ -513,31 +513,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
-       {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_i_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
-       {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_quz_a0_jf_b0_soc)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
        {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_quz_a0_jf_b0_soc)},
        {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc)},
@@ -571,20 +573,20 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0060, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x0064, iwl9461_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x00A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0210, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0230, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0234, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x0260, iwl9461_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
@@ -601,7 +603,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x2526, 0x401C, iwl9260_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_160_cfg)},
        {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x2526, 0x6010, iwl9260_2ac_160_cfg)},
@@ -616,33 +618,33 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x271C, 0x0214, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg)},
-       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_160_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -671,6 +673,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
+
        {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_160_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
        {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_160_cfg_shared_clk)},
@@ -726,62 +729,60 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
        {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
 
-       {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
+       {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_160_cfg_soc)},
@@ -821,34 +822,34 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x1552, iwl9560_killer_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
-       {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
+
+       {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1551, killer1550s_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x1552, killer1550i_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_160_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_qu_b0_jf_b0)},
+       {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_qu_b0_jf_b0)},
+
        {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_160_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
        {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_160_cfg_soc)},
@@ -1067,11 +1068,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                }
        } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(iwl_trans->hw_rf_id) ==
                   CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
-                  ((cfg != &iwl_ax200_cfg_cc &&
-                    cfg != &killer1650x_2ax_cfg &&
-                    cfg != &killer1650w_2ax_cfg &&
-                    cfg != &iwl_ax201_cfg_quz_hr) ||
-                   iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
+                  iwl_trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
                u32 hw_status;
 
                hw_status = iwl_read_prph(iwl_trans, UMAG_GEN_HW_STATUS);
index df8455f14e4d82462238de74a9b3f4ea664ccfe0..ca3bb4d65b00e2ff99421cf867f2812a4f2da563 100644 (file)
 #include "internal.h"
 #include "fw/dbg.h"
 
+static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
+{
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+       udelay(20);
+       iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                         HPM_HIPM_GEN_CFG_CR_PG_EN |
+                         HPM_HIPM_GEN_CFG_CR_SLP_EN);
+       udelay(20);
+       iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
+                           HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
+
+       iwl_trans_sw_reset(trans);
+       iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+       return 0;
+}
+
 /*
  * Start up NIC's basic functionality after it has been reset
  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
@@ -92,6 +110,13 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
 
        iwl_pcie_apm_config(trans);
 
+       if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+           trans->cfg->integrated) {
+               ret = iwl_pcie_gen2_force_power_gating(trans);
+               if (ret)
+                       return ret;
+       }
+
        ret = iwl_finish_nic_init(trans, trans->trans_cfg);
        if (ret)
                return ret;
index f8a1f985a1d85e0dd70a60a2cda12c5975714fe0..6961f00ff81219a31decbbd9ec733f1532b85a59 100644 (file)
@@ -3272,11 +3272,17 @@ static struct iwl_trans_dump_data
                ptr = cmdq->write_ptr;
                for (i = 0; i < cmdq->n_window; i++) {
                        u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
+                       u8 tfdidx;
                        u32 caplen, cmdlen;
 
+                       if (trans->trans_cfg->use_tfh)
+                               tfdidx = idx;
+                       else
+                               tfdidx = ptr;
+
                        cmdlen = iwl_trans_pcie_get_cmdlen(trans,
-                                                          cmdq->tfds +
-                                                          tfd_size * ptr);
+                                                          (u8 *)cmdq->tfds +
+                                                          tfd_size * tfdidx);
                        caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
 
                        if (cmdlen) {
@@ -3450,6 +3456,15 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
        spin_lock_init(&trans_pcie->reg_lock);
        mutex_init(&trans_pcie->mutex);
        init_waitqueue_head(&trans_pcie->ucode_write_waitq);
+
+       trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
+                                                  WQ_HIGHPRI | WQ_UNBOUND, 1);
+       if (!trans_pcie->rba.alloc_wq) {
+               ret = -ENOMEM;
+               goto out_free_trans;
+       }
+       INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
+
        trans_pcie->tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
        if (!trans_pcie->tso_hdr_page) {
                ret = -ENOMEM;
@@ -3584,10 +3599,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
                trans_pcie->inta_mask = CSR_INI_SET_MASK;
         }
 
-       trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
-                                                  WQ_HIGHPRI | WQ_UNBOUND, 1);
-       INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
-
 #ifdef CONFIG_IWLWIFI_DEBUGFS
        trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
        mutex_init(&trans_pcie->fw_mon_data.mutex);
@@ -3599,6 +3610,8 @@ out_free_ict:
        iwl_pcie_free_ict(trans);
 out_no_pci:
        free_percpu(trans_pcie->tso_hdr_page);
+       destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_trans:
        iwl_trans_free(trans);
        return ERR_PTR(ret);
 }
index 158a3d762e55d6e31afd2c358c81f7d93f7264c6..e323e9a5999fd5a307ec8609044cb553db65682f 100644 (file)
@@ -3041,30 +3041,6 @@ static void prism2_clear_set_tim_queue(local_info_t *local)
        }
 }
 
-
-/*
- * HostAP uses two layers of net devices, where the inner
- * layer gets called all the time from the outer layer.
- * This is a natural nesting, which needs a split lock type.
- */
-static struct lock_class_key hostap_netdev_xmit_lock_key;
-static struct lock_class_key hostap_netdev_addr_lock_key;
-
-static void prism2_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &hostap_netdev_xmit_lock_key);
-}
-
-static void prism2_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock,
-                         &hostap_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL);
-}
-
 static struct net_device *
 prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx,
                       struct device *sdev)
@@ -3223,7 +3199,6 @@ while (0)
        if (ret >= 0)
                ret = register_netdevice(dev);
 
-       prism2_set_lockdep_class(dev);
        rtnl_unlock();
        if (ret < 0) {
                printk(KERN_WARNING "%s: register netdevice failed!\n",
index 635956024e8859308f0f42bf3670db2bb0abf614..14f562cd715c0b8d27eb0fb21a08ccecbc416a01 100644 (file)
@@ -1261,8 +1261,8 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
        skb_orphan(skb);
        skb_dst_drop(skb);
        skb->mark = 0;
-       secpath_reset(skb);
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
        /*
         * Get absolute mactime here so all HWs RX at the "same time", and
@@ -4026,7 +4026,7 @@ static int __init init_mac80211_hwsim(void)
        err = dev_alloc_name(hwsim_mon, hwsim_mon->name);
        if (err < 0) {
                rtnl_unlock();
-               goto out_free_radios;
+               goto out_free_mon;
        }
 
        err = register_netdevice(hwsim_mon);
index 4d03596e891f11c66ff6cb011d3f4a39830fff1b..d7a1ddc9e40742a718337e7e35201eb8f81c079f 100644 (file)
@@ -8,6 +8,8 @@ mt76-y := \
        mmio.o util.o trace.o dma.o mac80211.o debugfs.o eeprom.o \
        tx.o agg-rx.o mcu.o
 
+mt76-$(CONFIG_PCI) += pci.o
+
 mt76-usb-y := usb.o usb_trace.o
 
 CFLAGS_trace.o := -I$(src)
index c747eb24581c42683f93f6e285a660e2edd9daea..8f69d00bd940a98aaa84a0166c7375357ecab82a 100644 (file)
@@ -53,8 +53,10 @@ mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
        u32 ctrl;
        int i, idx = -1;
 
-       if (txwi)
+       if (txwi) {
                q->entry[q->head].txwi = DMA_DUMMY_DATA;
+               q->entry[q->head].skip_buf0 = true;
+       }
 
        for (i = 0; i < nbufs; i += 2, buf += 2) {
                u32 buf0 = buf[0].addr, buf1 = 0;
@@ -97,7 +99,7 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
        __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl);
        u32 ctrl = le32_to_cpu(__ctrl);
 
-       if (!e->txwi || !e->skb) {
+       if (!e->skip_buf0) {
                __le32 addr = READ_ONCE(q->desc[idx].buf0);
                u32 len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
 
index 570c159515a09cae82d2e17cd74324cea5132cd5..8aec7ccf2d79b45ccbcd9388ab72e97e4c98bb2c 100644 (file)
@@ -93,8 +93,9 @@ struct mt76_queue_entry {
                struct urb *urb;
        };
        enum mt76_txq_id qid;
-       bool schedule;
-       bool done;
+       bool skip_buf0:1;
+       bool schedule:1;
+       bool done:1;
 };
 
 struct mt76_queue_regs {
@@ -578,6 +579,7 @@ bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 
 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
+void mt76_pci_disable_aspm(struct pci_dev *pdev);
 
 static inline u16 mt76_chip(struct mt76_dev *dev)
 {
index 73c3104f88582bbdefca26fb44b86f765248642e..cf611d1b817c07dd5179c1dc2aa0182975904a3f 100644 (file)
@@ -81,6 +81,8 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        /* RG_SSUSB_CDR_BR_PE1D = 0x3 */
        mt76_rmw_field(dev, 0x15c58, 0x3 << 6, 0x3);
 
+       mt76_pci_disable_aspm(pdev);
+
        return 0;
 
 error:
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
new file mode 100644 (file)
index 0000000..04c5a69
--- /dev/null
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (C) 2019 Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+
+#include <linux/pci.h>
+
+void mt76_pci_disable_aspm(struct pci_dev *pdev)
+{
+       struct pci_dev *parent = pdev->bus->self;
+       u16 aspm_conf, parent_aspm_conf = 0;
+
+       pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+       aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       if (parent) {
+               pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+                                         &parent_aspm_conf);
+               parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+       }
+
+       if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+               /* aspm already disabled */
+               return;
+       }
+
+       dev_info(&pdev->dev, "disabling ASPM %s %s\n",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L0S) ? "L0s" : "",
+                (aspm_conf & PCI_EXP_LNKCTL_ASPM_L1) ? "L1" : "");
+
+       if (IS_ENABLED(CONFIG_PCIEASPM)) {
+               int err;
+
+               err = pci_disable_link_state(pdev, aspm_conf);
+               if (!err)
+                       return;
+       }
+
+       /* both device and parent should have the same ASPM setting.
+        * disable ASPM in downstream component first and then upstream.
+        */
+       pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_conf);
+       if (parent)
+               pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+                                          aspm_conf);
+}
+EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
index 2b216edd0c7d2cbecb10ca1ec46834910538cbed..a90a518b40d368f9a3da741ea8efade3e5ef2791 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/leds.h>
 #include <linux/mutex.h>
 #include <linux/etherdevice.h>
-#include <linux/input-polldev.h>
 #include <linux/kfifo.h>
 #include <linux/hrtimer.h>
 #include <linux/average.h>
index 4d4e3888ef2080379509f8b12f5af9409a3b5c3e..f2395309ec006a60ecea426992cab9e50a86d75f 100644 (file)
@@ -555,7 +555,7 @@ static ssize_t rt2x00debug_write_restart_hw(struct file *file,
 {
        struct rt2x00debug_intf *intf = file->private_data;
        struct rt2x00_dev *rt2x00dev = intf->rt2x00dev;
-       static unsigned long last_reset;
+       static unsigned long last_reset = INITIAL_JIFFIES;
 
        if (!rt2x00_has_cap_restart_hw(rt2x00dev))
                return -EOPNOTSUPP;
index 6087ec7a90a6e6ad5d2ac1921beadd243780f866..f88d26535978d958b6b21eba521e446786b87b10 100644 (file)
@@ -822,7 +822,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                hdr = rtl_get_hdr(skb);
                fc = rtl_get_fc(skb);
 
-               if (!stats.crc && !stats.hwerror) {
+               if (!stats.crc && !stats.hwerror && (skb->len > FCS_LEN)) {
                        memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
                               sizeof(rx_status));
 
@@ -859,6 +859,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
                                _rtl_pci_rx_to_mac80211(hw, skb, rx_status);
                        }
                } else {
+                       /* drop packets with errors or those too short */
                        dev_kfree_skb_any(skb);
                }
 new_trx_end:
index 70f04c2f5b176ffe8a8178fb02cbbffbdd4a2990..fff8dda14023861158acc10bea9f6df2b203d2f6 100644 (file)
@@ -754,6 +754,9 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
@@ -848,6 +851,9 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
                                return;
                        } else {
                                noa_num = (noa_len - 2) / 13;
+                               if (noa_num > P2P_MAX_NOA_NUM)
+                                       noa_num = P2P_MAX_NOA_NUM;
+
                        }
                        noa_index = ie[3];
                        if (rtlpriv->psc.p2p_ps_info.p2p_ps_mode ==
index be92e1220284c7d07d2b8143f5b8084e1d9375da..7997cc6de334f5222f038f3b058f036078aa41db 100644 (file)
@@ -548,6 +548,7 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
        priv->is_connected = false;
        priv->is_up = false;
        INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete);
+       __module_get(THIS_MODULE);
 
        return 0;
 unregister_netdev:
@@ -578,6 +579,7 @@ static void virt_wifi_dellink(struct net_device *dev,
        netdev_upper_dev_unlink(priv->lowerdev, dev);
 
        unregister_netdevice_queue(dev, head);
+       module_put(THIS_MODULE);
 
        /* Deleting the wiphy is handled in the module destructor. */
 }
@@ -590,6 +592,42 @@ static struct rtnl_link_ops virt_wifi_link_ops = {
        .priv_size      = sizeof(struct virt_wifi_netdev_priv),
 };
 
+static bool netif_is_virt_wifi_dev(const struct net_device *dev)
+{
+       return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler;
+}
+
+static int virt_wifi_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr);
+       struct virt_wifi_netdev_priv *priv;
+       struct net_device *upper_dev;
+       LIST_HEAD(list_kill);
+
+       if (!netif_is_virt_wifi_dev(lower_dev))
+               return NOTIFY_DONE;
+
+       switch (event) {
+       case NETDEV_UNREGISTER:
+               priv = rtnl_dereference(lower_dev->rx_handler_data);
+               if (!priv)
+                       return NOTIFY_DONE;
+
+               upper_dev = priv->upperdev;
+
+               upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill);
+               unregister_netdevice_many(&list_kill);
+               break;
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block virt_wifi_notifier = {
+       .notifier_call = virt_wifi_event,
+};
+
 /* Acquires and releases the rtnl lock. */
 static int __init virt_wifi_init_module(void)
 {
@@ -598,14 +636,25 @@ static int __init virt_wifi_init_module(void)
        /* Guaranteed to be locallly-administered and not multicast. */
        eth_random_addr(fake_router_bssid);
 
+       err = register_netdevice_notifier(&virt_wifi_notifier);
+       if (err)
+               return err;
+
+       err = -ENOMEM;
        common_wiphy = virt_wifi_make_wiphy();
        if (!common_wiphy)
-               return -ENOMEM;
+               goto notifier;
 
        err = rtnl_link_register(&virt_wifi_link_ops);
        if (err)
-               virt_wifi_destroy_wiphy(common_wiphy);
+               goto destroy_wiphy;
 
+       return 0;
+
+destroy_wiphy:
+       virt_wifi_destroy_wiphy(common_wiphy);
+notifier:
+       unregister_netdevice_notifier(&virt_wifi_notifier);
        return err;
 }
 
@@ -615,6 +664,7 @@ static void __exit virt_wifi_cleanup_module(void)
        /* Will delete any devices that depend on the wiphy. */
        rtnl_link_unregister(&virt_wifi_link_ops);
        virt_wifi_destroy_wiphy(common_wiphy);
+       unregister_netdevice_notifier(&virt_wifi_notifier);
 }
 
 module_init(virt_wifi_init_module);
index 240f762b374953b94cf8855823e4f10d9ff91944..103ed00775eb4a608220067a54f09a909f255e71 100644 (file)
@@ -719,7 +719,6 @@ err_unmap:
        xenvif_unmap_frontend_data_rings(queue);
        netif_napi_del(&queue->napi);
 err:
-       module_put(THIS_MODULE);
        return err;
 }
 
index e14ec75b61d60776008b05d1bbd0b0a0d3055bbb..482c6c8b0fb7e0fb2476d52b41156b45903a46fe 100644 (file)
@@ -887,9 +887,9 @@ static int xennet_set_skb_gso(struct sk_buff *skb,
        return 0;
 }
 
-static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
-                                 struct sk_buff *skb,
-                                 struct sk_buff_head *list)
+static int xennet_fill_frags(struct netfront_queue *queue,
+                            struct sk_buff *skb,
+                            struct sk_buff_head *list)
 {
        RING_IDX cons = queue->rx.rsp_cons;
        struct sk_buff *nskb;
@@ -908,7 +908,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
                        queue->rx.rsp_cons = ++cons + skb_queue_len(list);
                        kfree_skb(nskb);
-                       return ~0U;
+                       return -ENOENT;
                }
 
                skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -919,7 +919,9 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
                kfree_skb(nskb);
        }
 
-       return cons;
+       queue->rx.rsp_cons = cons;
+
+       return 0;
 }
 
 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
@@ -1045,8 +1047,7 @@ err:
                skb->data_len = rx->status;
                skb->len += rx->status;
 
-               i = xennet_fill_frags(queue, skb, &tmpq);
-               if (unlikely(i == ~0U))
+               if (unlikely(xennet_fill_frags(queue, skb, &tmpq)))
                        goto err;
 
                if (rx->flags & XEN_NETRXF_csum_blank)
@@ -1056,7 +1057,7 @@ err:
 
                __skb_queue_tail(&rxq, skb);
 
-               queue->rx.rsp_cons = ++i;
+               i = ++queue->rx.rsp_cons;
                work_done++;
        }
 
index c5289eaf17eef7f65a09cf478dc7b7e8d29592bd..e897e4d768ef7968032a1614fe3a207ed5190795 100644 (file)
@@ -547,18 +547,25 @@ static int pn533_usb_probe(struct usb_interface *interface,
 
        rc = pn533_finalize_setup(priv);
        if (rc)
-               goto error;
+               goto err_deregister;
 
        usb_set_intfdata(interface, phy);
 
        return 0;
 
+err_deregister:
+       pn533_unregister_device(phy->priv);
 error:
+       usb_kill_urb(phy->in_urb);
+       usb_kill_urb(phy->out_urb);
+       usb_kill_urb(phy->ack_urb);
+
        usb_free_urb(phy->in_urb);
        usb_free_urb(phy->out_urb);
        usb_free_urb(phy->ack_urb);
        usb_put_dev(phy->udev);
        kfree(in_buf);
+       kfree(phy->ack_buffer);
 
        return rc;
 }
index 108f60b46804a440c6acf07e336d763c4cad29f3..fa7ba09dca77199708b2729a6747ad9c96c9e84f 100644 (file)
@@ -102,10 +102,13 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
         */
        if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
                return;
-       revalidate_disk(ns->disk);
        blk_set_queue_dying(ns->queue);
        /* Forcibly unquiesce queues to avoid blocking dispatch */
        blk_mq_unquiesce_queue(ns->queue);
+       /*
+        * Revalidate after unblocking dispatchers that may be holding bd_butex
+        */
+       revalidate_disk(ns->disk);
 }
 
 static void nvme_queue_scan(struct nvme_ctrl *ctrl)
@@ -113,10 +116,26 @@ static void nvme_queue_scan(struct nvme_ctrl *ctrl)
        /*
         * Only new queue scan work when admin and IO queues are both alive
         */
-       if (ctrl->state == NVME_CTRL_LIVE)
+       if (ctrl->state == NVME_CTRL_LIVE && ctrl->tagset)
                queue_work(nvme_wq, &ctrl->scan_work);
 }
 
+/*
+ * Use this function to proceed with scheduling reset_work for a controller
+ * that had previously been set to the resetting state. This is intended for
+ * code paths that can't be interrupted by other reset attempts. A hot removal
+ * may prevent this from succeeding.
+ */
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl)
+{
+       if (ctrl->state != NVME_CTRL_RESETTING)
+               return -EBUSY;
+       if (!queue_work(nvme_reset_wq, &ctrl->reset_work))
+               return -EBUSY;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvme_try_sched_reset);
+
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
 {
        if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
@@ -134,8 +153,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
        ret = nvme_reset_ctrl(ctrl);
        if (!ret) {
                flush_work(&ctrl->reset_work);
-               if (ctrl->state != NVME_CTRL_LIVE &&
-                   ctrl->state != NVME_CTRL_ADMIN_ONLY)
+               if (ctrl->state != NVME_CTRL_LIVE)
                        ret = -ENETRESET;
        }
 
@@ -312,15 +330,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 
        old_state = ctrl->state;
        switch (new_state) {
-       case NVME_CTRL_ADMIN_ONLY:
-               switch (old_state) {
-               case NVME_CTRL_CONNECTING:
-                       changed = true;
-                       /* FALLTHRU */
-               default:
-                       break;
-               }
-               break;
        case NVME_CTRL_LIVE:
                switch (old_state) {
                case NVME_CTRL_NEW:
@@ -336,7 +345,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                switch (old_state) {
                case NVME_CTRL_NEW:
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_ADMIN_ONLY:
                        changed = true;
                        /* FALLTHRU */
                default:
@@ -356,7 +364,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
        case NVME_CTRL_DELETING:
                switch (old_state) {
                case NVME_CTRL_LIVE:
-               case NVME_CTRL_ADMIN_ONLY:
                case NVME_CTRL_RESETTING:
                case NVME_CTRL_CONNECTING:
                        changed = true;
@@ -378,8 +385,10 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                break;
        }
 
-       if (changed)
+       if (changed) {
                ctrl->state = new_state;
+               wake_up_all(&ctrl->state_wq);
+       }
 
        spin_unlock_irqrestore(&ctrl->lock, flags);
        if (changed && ctrl->state == NVME_CTRL_LIVE)
@@ -388,6 +397,39 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
 
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+       switch (ctrl->state) {
+       case NVME_CTRL_NEW:
+       case NVME_CTRL_LIVE:
+       case NVME_CTRL_RESETTING:
+       case NVME_CTRL_CONNECTING:
+               return false;
+       case NVME_CTRL_DELETING:
+       case NVME_CTRL_DEAD:
+               return true;
+       default:
+               WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+               return true;
+       }
+}
+
+/*
+ * Waits for the controller state to be resetting, or returns false if it is
+ * not possible to ever transition to that state.
+ */
+bool nvme_wait_reset(struct nvme_ctrl *ctrl)
+{
+       wait_event(ctrl->state_wq,
+                  nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING) ||
+                  nvme_state_terminal(ctrl));
+       return ctrl->state == NVME_CTRL_RESETTING;
+}
+EXPORT_SYMBOL_GPL(nvme_wait_reset);
+
 static void nvme_free_ns_head(struct kref *ref)
 {
        struct nvme_ns_head *head =
@@ -847,7 +889,7 @@ out:
 static int nvme_submit_user_cmd(struct request_queue *q,
                struct nvme_command *cmd, void __user *ubuffer,
                unsigned bufflen, void __user *meta_buffer, unsigned meta_len,
-               u32 meta_seed, u32 *result, unsigned timeout)
+               u32 meta_seed, u64 *result, unsigned timeout)
 {
        bool write = nvme_is_write(cmd);
        struct nvme_ns *ns = q->queuedata;
@@ -888,7 +930,7 @@ static int nvme_submit_user_cmd(struct request_queue *q,
        else
                ret = nvme_req(req)->status;
        if (result)
-               *result = le32_to_cpu(nvme_req(req)->result.u32);
+               *result = le64_to_cpu(nvme_req(req)->result.u64);
        if (meta && !ret && !write) {
                if (copy_to_user(meta_buffer, meta, meta_len))
                        ret = -EFAULT;
@@ -1303,8 +1345,6 @@ static void nvme_update_formats(struct nvme_ctrl *ctrl)
                if (ns->disk && nvme_revalidate_disk(ns->disk))
                        nvme_set_queue_dying(ns);
        up_read(&ctrl->namespaces_rwsem);
-
-       nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
 }
 
 static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1320,6 +1360,7 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
                nvme_unfreeze(ctrl);
                nvme_mpath_unfreeze(ctrl->subsys);
                mutex_unlock(&ctrl->subsys->lock);
+               nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
                mutex_unlock(&ctrl->scan_lock);
        }
        if (effects & NVME_CMD_EFFECTS_CCC)
@@ -1335,6 +1376,54 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
        struct nvme_command c;
        unsigned timeout = 0;
        u32 effects;
+       u64 result;
+       int status;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+       if (cmd.flags)
+               return -EINVAL;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10 = cpu_to_le32(cmd.cdw10);
+       c.common.cdw11 = cpu_to_le32(cmd.cdw11);
+       c.common.cdw12 = cpu_to_le32(cmd.cdw12);
+       c.common.cdw13 = cpu_to_le32(cmd.cdw13);
+       c.common.cdw14 = cpu_to_le32(cmd.cdw14);
+       c.common.cdw15 = cpu_to_le32(cmd.cdw15);
+
+       if (cmd.timeout_ms)
+               timeout = msecs_to_jiffies(cmd.timeout_ms);
+
+       effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
+       status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
+                       (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
+                       (void __user *)(uintptr_t)cmd.metadata,
+                       cmd.metadata_len, 0, &result, timeout);
+       nvme_passthru_end(ctrl, effects);
+
+       if (status >= 0) {
+               if (put_user(result, &ucmd->result))
+                       return -EFAULT;
+       }
+
+       return status;
+}
+
+static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
+                       struct nvme_passthru_cmd64 __user *ucmd)
+{
+       struct nvme_passthru_cmd64 cmd;
+       struct nvme_command c;
+       unsigned timeout = 0;
+       u32 effects;
        int status;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -1405,6 +1494,41 @@ static void nvme_put_ns_from_disk(struct nvme_ns_head *head, int idx)
                srcu_read_unlock(&head->srcu, idx);
 }
 
+static bool is_ctrl_ioctl(unsigned int cmd)
+{
+       if (cmd == NVME_IOCTL_ADMIN_CMD || cmd == NVME_IOCTL_ADMIN64_CMD)
+               return true;
+       if (is_sed_ioctl(cmd))
+               return true;
+       return false;
+}
+
+static int nvme_handle_ctrl_ioctl(struct nvme_ns *ns, unsigned int cmd,
+                                 void __user *argp,
+                                 struct nvme_ns_head *head,
+                                 int srcu_idx)
+{
+       struct nvme_ctrl *ctrl = ns->ctrl;
+       int ret;
+
+       nvme_get_ctrl(ns->ctrl);
+       nvme_put_ns_from_disk(head, srcu_idx);
+
+       switch (cmd) {
+       case NVME_IOCTL_ADMIN_CMD:
+               ret = nvme_user_cmd(ctrl, NULL, argp);
+               break;
+       case NVME_IOCTL_ADMIN64_CMD:
+               ret = nvme_user_cmd64(ctrl, NULL, argp);
+               break;
+       default:
+               ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
+               break;
+       }
+       nvme_put_ctrl(ctrl);
+       return ret;
+}
+
 static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
                unsigned int cmd, unsigned long arg)
 {
@@ -1422,20 +1546,8 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
         * seperately and drop the ns SRCU reference early.  This avoids a
         * deadlock when deleting namespaces using the passthrough interface.
         */
-       if (cmd == NVME_IOCTL_ADMIN_CMD || is_sed_ioctl(cmd)) {
-               struct nvme_ctrl *ctrl = ns->ctrl;
-
-               nvme_get_ctrl(ns->ctrl);
-               nvme_put_ns_from_disk(head, srcu_idx);
-
-               if (cmd == NVME_IOCTL_ADMIN_CMD)
-                       ret = nvme_user_cmd(ctrl, NULL, argp);
-               else
-                       ret = sed_ioctl(ctrl->opal_dev, cmd, argp);
-
-               nvme_put_ctrl(ctrl);
-               return ret;
-       }
+       if (is_ctrl_ioctl(cmd))
+               return nvme_handle_ctrl_ioctl(ns, cmd, argp, head, srcu_idx);
 
        switch (cmd) {
        case NVME_IOCTL_ID:
@@ -1448,6 +1560,9 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode,
        case NVME_IOCTL_SUBMIT_IO:
                ret = nvme_submit_io(ns, argp);
                break;
+       case NVME_IOCTL_IO64_CMD:
+               ret = nvme_user_cmd64(ns->ctrl, ns, argp);
+               break;
        default:
                if (ns->ndev)
                        ret = nvme_nvm_ioctl(ns, cmd, arg);
@@ -2289,6 +2404,16 @@ static const struct nvme_core_quirk_entry core_quirks[] = {
                .vid = 0x14a4,
                .fr = "22301111",
                .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
+       },
+       {
+               /*
+                * This Kingston E8FK11.T firmware version has no interrupt
+                * after resume with actions related to suspend to idle
+                * https://bugzilla.kernel.org/show_bug.cgi?id=204887
+                */
+               .vid = 0x2646,
+               .fr = "E8FK11.T",
+               .quirks = NVME_QUIRK_SIMPLE_SUSPEND,
        }
 };
 
@@ -2540,8 +2665,9 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                list_add_tail(&subsys->entry, &nvme_subsystems);
        }
 
-       if (sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
-                       dev_name(ctrl->device))) {
+       ret = sysfs_create_link(&subsys->dev.kobj, &ctrl->device->kobj,
+                               dev_name(ctrl->device));
+       if (ret) {
                dev_err(ctrl->device,
                        "failed to create sysfs link from subsystem.\n");
                goto out_put_subsystem;
@@ -2786,7 +2912,6 @@ static int nvme_dev_open(struct inode *inode, struct file *file)
 
        switch (ctrl->state) {
        case NVME_CTRL_LIVE:
-       case NVME_CTRL_ADMIN_ONLY:
                break;
        default:
                return -EWOULDBLOCK;
@@ -2838,6 +2963,8 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
        switch (cmd) {
        case NVME_IOCTL_ADMIN_CMD:
                return nvme_user_cmd(ctrl, NULL, argp);
+       case NVME_IOCTL_ADMIN64_CMD:
+               return nvme_user_cmd64(ctrl, NULL, argp);
        case NVME_IOCTL_IO_CMD:
                return nvme_dev_user_cmd(ctrl, argp);
        case NVME_IOCTL_RESET:
@@ -3045,6 +3172,8 @@ static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
 
 nvme_show_int_function(cntlid);
 nvme_show_int_function(numa_node);
+nvme_show_int_function(queue_count);
+nvme_show_int_function(sqsize);
 
 static ssize_t nvme_sysfs_delete(struct device *dev,
                                struct device_attribute *attr, const char *buf,
@@ -3076,7 +3205,6 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
        static const char *const state_name[] = {
                [NVME_CTRL_NEW]         = "new",
                [NVME_CTRL_LIVE]        = "live",
-               [NVME_CTRL_ADMIN_ONLY]  = "only-admin",
                [NVME_CTRL_RESETTING]   = "resetting",
                [NVME_CTRL_CONNECTING]  = "connecting",
                [NVME_CTRL_DELETING]    = "deleting",
@@ -3125,6 +3253,8 @@ static struct attribute *nvme_dev_attrs[] = {
        &dev_attr_address.attr,
        &dev_attr_state.attr,
        &dev_attr_numa_node.attr,
+       &dev_attr_queue_count.attr,
+       &dev_attr_sqsize.attr,
        NULL
 };
 
@@ -3585,11 +3715,10 @@ static void nvme_scan_work(struct work_struct *work)
        struct nvme_id_ctrl *id;
        unsigned nn;
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       /* No tagset on a live ctrl means IO queues could not created */
+       if (ctrl->state != NVME_CTRL_LIVE || !ctrl->tagset)
                return;
 
-       WARN_ON_ONCE(!ctrl->tagset);
-
        if (test_and_clear_bit(NVME_AER_NOTICE_NS_CHANGED, &ctrl->events)) {
                dev_info(ctrl->device, "rescanning namespaces.\n");
                nvme_clear_changed_ns_log(ctrl);
@@ -3750,13 +3879,13 @@ static void nvme_fw_act_work(struct work_struct *work)
                if (time_after(jiffies, fw_act_timeout)) {
                        dev_warn(ctrl->device,
                                "Fw activation timeout, reset controller\n");
-                       nvme_reset_ctrl(ctrl);
-                       break;
+                       nvme_try_sched_reset(ctrl);
+                       return;
                }
                msleep(100);
        }
 
-       if (ctrl->state != NVME_CTRL_LIVE)
+       if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_LIVE))
                return;
 
        nvme_start_queues(ctrl);
@@ -3776,7 +3905,13 @@ static void nvme_handle_aen_notice(struct nvme_ctrl *ctrl, u32 result)
                nvme_queue_scan(ctrl);
                break;
        case NVME_AER_NOTICE_FW_ACT_STARTING:
-               queue_work(nvme_wq, &ctrl->fw_act_work);
+               /*
+                * We are (ab)using the RESETTING state to prevent subsequent
+                * recovery actions from interfering with the controller's
+                * firmware activation.
+                */
+               if (nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
+                       queue_work(nvme_wq, &ctrl->fw_act_work);
                break;
 #ifdef CONFIG_NVME_MULTIPATH
        case NVME_AER_NOTICE_ANA:
@@ -3899,6 +4034,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
        INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
        INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
        INIT_WORK(&ctrl->delete_work, nvme_delete_ctrl_work);
+       init_waitqueue_head(&ctrl->state_wq);
 
        INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
        memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
index 93f08d77c89672d2717dc89a5ff2f314f3ff1fec..a0ec40ab62eeba697575da4bd02fdc9cdfd31409 100644 (file)
@@ -182,8 +182,7 @@ bool nvmf_ip_options_match(struct nvme_ctrl *ctrl,
 static inline bool nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
                bool queue_live)
 {
-       if (likely(ctrl->state == NVME_CTRL_LIVE ||
-                  ctrl->state == NVME_CTRL_ADMIN_ONLY))
+       if (likely(ctrl->state == NVME_CTRL_LIVE))
                return true;
        return __nvmf_check_ready(ctrl, rq, queue_live);
 }
index 30de7efef00357f5231ebdee06953aa341aac6fc..fc99a40c1ec4c0e6a561e5e791a5602393fad475 100644 (file)
@@ -522,14 +522,13 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
        return 0;
 }
 
-static int nvme_read_ana_log(struct nvme_ctrl *ctrl, bool groups_only)
+static int nvme_read_ana_log(struct nvme_ctrl *ctrl)
 {
        u32 nr_change_groups = 0;
        int error;
 
        mutex_lock(&ctrl->ana_lock);
-       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA,
-                       groups_only ? NVME_ANA_LOG_RGO : 0,
+       error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0,
                        ctrl->ana_log_buf, ctrl->ana_log_size, 0);
        if (error) {
                dev_warn(ctrl->device, "Failed to get ANA log: %d\n", error);
@@ -565,7 +564,7 @@ static void nvme_ana_work(struct work_struct *work)
 {
        struct nvme_ctrl *ctrl = container_of(work, struct nvme_ctrl, ana_work);
 
-       nvme_read_ana_log(ctrl, false);
+       nvme_read_ana_log(ctrl);
 }
 
 static void nvme_anatt_timeout(struct timer_list *t)
@@ -715,7 +714,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
                goto out;
        }
 
-       error = nvme_read_ana_log(ctrl, true);
+       error = nvme_read_ana_log(ctrl);
        if (error)
                goto out_free_ana_log_buf;
        return 0;
index b5013c101b352845de6fa977b279b9571f470e61..22e8401352c22b19d397a779006ae0ffaa49f614 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sed-opal.h>
 #include <linux/fault-inject.h>
 #include <linux/rcupdate.h>
+#include <linux/wait.h>
 
 #include <trace/events/block.h>
 
@@ -161,7 +162,6 @@ static inline u16 nvme_req_qid(struct request *req)
 enum nvme_ctrl_state {
        NVME_CTRL_NEW,
        NVME_CTRL_LIVE,
-       NVME_CTRL_ADMIN_ONLY,    /* Only admin queue live */
        NVME_CTRL_RESETTING,
        NVME_CTRL_CONNECTING,
        NVME_CTRL_DELETING,
@@ -199,6 +199,7 @@ struct nvme_ctrl {
        struct cdev cdev;
        struct work_struct reset_work;
        struct work_struct delete_work;
+       wait_queue_head_t state_wq;
 
        struct nvme_subsystem *subsys;
        struct list_head subsys_entry;
@@ -221,6 +222,7 @@ struct nvme_ctrl {
        u16 oacs;
        u16 nssa;
        u16 nr_streams;
+       u16 sqsize;
        u32 max_namespaces;
        atomic_t abort_limit;
        u8 vwc;
@@ -269,7 +271,6 @@ struct nvme_ctrl {
        u16 hmmaxd;
 
        /* Fabrics only */
-       u16 sqsize;
        u32 ioccsz;
        u32 iorcsz;
        u16 icdoff;
@@ -449,6 +450,7 @@ void nvme_complete_rq(struct request *req);
 bool nvme_cancel_request(struct request *req, void *data, bool reserved);
 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
                enum nvme_ctrl_state new_state);
+bool nvme_wait_reset(struct nvme_ctrl *ctrl);
 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
@@ -499,6 +501,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
+int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
 
 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
index c0808f9eb8ab694a5fc027d884b71d7f3cd5bd6e..869f462e6b6ea01c5951e94ad247a3f727b215fe 100644 (file)
@@ -773,7 +773,8 @@ static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
                struct bio_vec *bv)
 {
        struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
-       unsigned int first_prp_len = dev->ctrl.page_size - bv->bv_offset;
+       unsigned int offset = bv->bv_offset & (dev->ctrl.page_size - 1);
+       unsigned int first_prp_len = dev->ctrl.page_size - offset;
 
        iod->first_dma = dma_map_bvec(dev->dev, bv, rq_dma_dir(req), 0);
        if (dma_mapping_error(dev->dev, iod->first_dma))
@@ -2263,10 +2264,7 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
        return true;
 }
 
-/*
- * return error value only when tagset allocation failed
- */
-static int nvme_dev_add(struct nvme_dev *dev)
+static void nvme_dev_add(struct nvme_dev *dev)
 {
        int ret;
 
@@ -2296,7 +2294,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                if (ret) {
                        dev_warn(dev->ctrl.device,
                                "IO queues tagset allocation failed %d\n", ret);
-                       return ret;
+                       return;
                }
                dev->ctrl.tagset = &dev->tagset;
        } else {
@@ -2307,7 +2305,6 @@ static int nvme_dev_add(struct nvme_dev *dev)
        }
 
        nvme_dbbuf_set(dev);
-       return 0;
 }
 
 static int nvme_pci_enable(struct nvme_dev *dev)
@@ -2467,6 +2464,14 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
        mutex_unlock(&dev->shutdown_lock);
 }
 
+static int nvme_disable_prepare_reset(struct nvme_dev *dev, bool shutdown)
+{
+       if (!nvme_wait_reset(&dev->ctrl))
+               return -EBUSY;
+       nvme_dev_disable(dev, shutdown);
+       return 0;
+}
+
 static int nvme_setup_prp_pools(struct nvme_dev *dev)
 {
        dev->prp_page_pool = dma_pool_create("prp list page", dev->dev,
@@ -2490,14 +2495,20 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
        dma_pool_destroy(dev->prp_small_pool);
 }
 
+static void nvme_free_tagset(struct nvme_dev *dev)
+{
+       if (dev->tagset.tags)
+               blk_mq_free_tag_set(&dev->tagset);
+       dev->ctrl.tagset = NULL;
+}
+
 static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 {
        struct nvme_dev *dev = to_nvme_dev(ctrl);
 
        nvme_dbbuf_dma_free(dev);
        put_device(dev->dev);
-       if (dev->tagset.tags)
-               blk_mq_free_tag_set(&dev->tagset);
+       nvme_free_tagset(dev);
        if (dev->ctrl.admin_q)
                blk_put_queue(dev->ctrl.admin_q);
        kfree(dev->queues);
@@ -2508,6 +2519,11 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
 
 static void nvme_remove_dead_ctrl(struct nvme_dev *dev)
 {
+       /*
+        * Set state to deleting now to avoid blocking nvme_wait_reset(), which
+        * may be holding this pci_dev's device lock.
+        */
+       nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
        nvme_get_ctrl(&dev->ctrl);
        nvme_dev_disable(dev, false);
        nvme_kill_queues(&dev->ctrl);
@@ -2521,7 +2537,6 @@ static void nvme_reset_work(struct work_struct *work)
                container_of(work, struct nvme_dev, ctrl.reset_work);
        bool was_suspend = !!(dev->ctrl.ctrl_config & NVME_CC_SHN_NORMAL);
        int result;
-       enum nvme_ctrl_state new_state = NVME_CTRL_LIVE;
 
        if (WARN_ON(dev->ctrl.state != NVME_CTRL_RESETTING)) {
                result = -ENODEV;
@@ -2615,13 +2630,11 @@ static void nvme_reset_work(struct work_struct *work)
                dev_warn(dev->ctrl.device, "IO queues not created\n");
                nvme_kill_queues(&dev->ctrl);
                nvme_remove_namespaces(&dev->ctrl);
-               new_state = NVME_CTRL_ADMIN_ONLY;
+               nvme_free_tagset(dev);
        } else {
                nvme_start_queues(&dev->ctrl);
                nvme_wait_freeze(&dev->ctrl);
-               /* hit this only when allocate tagset fails */
-               if (nvme_dev_add(dev))
-                       new_state = NVME_CTRL_ADMIN_ONLY;
+               nvme_dev_add(dev);
                nvme_unfreeze(&dev->ctrl);
        }
 
@@ -2629,9 +2642,9 @@ static void nvme_reset_work(struct work_struct *work)
         * If only admin queue live, keep it to do further investigation or
         * recovery.
         */
-       if (!nvme_change_ctrl_state(&dev->ctrl, new_state)) {
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_LIVE)) {
                dev_warn(dev->ctrl.device,
-                       "failed to mark controller state %d\n", new_state);
+                       "failed to mark controller live state\n");
                result = -ENODEV;
                goto out;
        }
@@ -2672,7 +2685,7 @@ static int nvme_pci_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val)
 
 static int nvme_pci_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val)
 {
-       *val = readq(to_nvme_dev(ctrl)->bar + off);
+       *val = lo_hi_readq(to_nvme_dev(ctrl)->bar + off);
        return 0;
 }
 
@@ -2836,19 +2849,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void nvme_reset_prepare(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_dev_disable(dev, false);
+
+       /*
+        * We don't need to check the return value from waiting for the reset
+        * state as pci_dev device lock is held, making it impossible to race
+        * with ->remove().
+        */
+       nvme_disable_prepare_reset(dev, false);
+       nvme_sync_queues(&dev->ctrl);
 }
 
 static void nvme_reset_done(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_reset_ctrl_sync(&dev->ctrl);
+
+       if (!nvme_try_sched_reset(&dev->ctrl))
+               flush_work(&dev->ctrl.reset_work);
 }
 
 static void nvme_shutdown(struct pci_dev *pdev)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
-       nvme_dev_disable(dev, true);
+       nvme_disable_prepare_reset(dev, true);
 }
 
 /*
@@ -2901,7 +2923,7 @@ static int nvme_resume(struct device *dev)
 
        if (ndev->last_ps == U32_MAX ||
            nvme_set_power_state(ctrl, ndev->last_ps) != 0)
-               nvme_reset_ctrl(ctrl);
+               return nvme_try_sched_reset(&ndev->ctrl);
        return 0;
 }
 
@@ -2929,43 +2951,42 @@ static int nvme_suspend(struct device *dev)
         */
        if (pm_suspend_via_firmware() || !ctrl->npss ||
            !pcie_aspm_enabled(pdev) ||
-           (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND)) {
-               nvme_dev_disable(ndev, true);
-               return 0;
-       }
+           (ndev->ctrl.quirks & NVME_QUIRK_SIMPLE_SUSPEND))
+               return nvme_disable_prepare_reset(ndev, true);
 
        nvme_start_freeze(ctrl);
        nvme_wait_freeze(ctrl);
        nvme_sync_queues(ctrl);
 
-       if (ctrl->state != NVME_CTRL_LIVE &&
-           ctrl->state != NVME_CTRL_ADMIN_ONLY)
+       if (ctrl->state != NVME_CTRL_LIVE)
                goto unfreeze;
 
        ret = nvme_get_power_state(ctrl, &ndev->last_ps);
        if (ret < 0)
                goto unfreeze;
 
+       /*
+        * A saved state prevents pci pm from generically controlling the
+        * device's power. If we're using protocol specific settings, we don't
+        * want pci interfering.
+        */
+       pci_save_state(pdev);
+
        ret = nvme_set_power_state(ctrl, ctrl->npss);
        if (ret < 0)
                goto unfreeze;
 
        if (ret) {
+               /* discard the saved state */
+               pci_load_saved_state(pdev, NULL);
+
                /*
                 * Clearing npss forces a controller reset on resume. The
                 * correct value will be resdicovered then.
                 */
-               nvme_dev_disable(ndev, true);
+               ret = nvme_disable_prepare_reset(ndev, true);
                ctrl->npss = 0;
-               ret = 0;
-               goto unfreeze;
        }
-       /*
-        * A saved state prevents pci pm from generically controlling the
-        * device's power. If we're using protocol specific settings, we don't
-        * want pci interfering.
-        */
-       pci_save_state(pdev);
 unfreeze:
        nvme_unfreeze(ctrl);
        return ret;
@@ -2974,9 +2995,7 @@ unfreeze:
 static int nvme_simple_suspend(struct device *dev)
 {
        struct nvme_dev *ndev = pci_get_drvdata(to_pci_dev(dev));
-
-       nvme_dev_disable(ndev, true);
-       return 0;
+       return nvme_disable_prepare_reset(ndev, true);
 }
 
 static int nvme_simple_resume(struct device *dev)
@@ -2984,8 +3003,7 @@ static int nvme_simple_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
 
-       nvme_reset_ctrl(&ndev->ctrl);
-       return 0;
+       return nvme_try_sched_reset(&ndev->ctrl);
 }
 
 static const struct dev_pm_ops nvme_dev_pm_ops = {
@@ -3090,6 +3108,9 @@ static const struct pci_device_id nvme_id_table[] = {
                .driver_data = NVME_QUIRK_LIGHTNVM, },
        { PCI_DEVICE(0x10ec, 0x5762),   /* ADATA SX6000LNP */
                .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, },
+       { PCI_DEVICE(0x1cc1, 0x8201),   /* ADATA SX8200PNP 512GB */
+               .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+                               NVME_QUIRK_IGNORE_DEV_SUBNQN, },
        { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
        { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
index dfa07bb9dfeb51aa828036319da40b7e0d09114c..f19a28b4e997f473c40c56c0564645565b8c71e7 100644 (file)
@@ -427,7 +427,7 @@ static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
 static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
 {
        return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
-                    ibdev->attrs.max_fast_reg_page_list_len);
+                    ibdev->attrs.max_fast_reg_page_list_len - 1);
 }
 
 static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
@@ -437,7 +437,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
        const int cq_factor = send_wr_factor + 1;       /* + RECV */
        int comp_vector, idx = nvme_rdma_queue_idx(queue);
        enum ib_poll_context poll_ctx;
-       int ret;
+       int ret, pages_per_mr;
 
        queue->device = nvme_rdma_find_get_device(queue->cm_id);
        if (!queue->device) {
@@ -479,10 +479,16 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
                goto out_destroy_qp;
        }
 
+       /*
+        * Currently we don't use SG_GAPS MR's so if the first entry is
+        * misaligned we'll end up using two entries for a single data page,
+        * so one additional entry is required.
+        */
+       pages_per_mr = nvme_rdma_get_max_fr_pages(ibdev) + 1;
        ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
                              queue->queue_size,
                              IB_MR_TYPE_MEM_REG,
-                             nvme_rdma_get_max_fr_pages(ibdev), 0);
+                             pages_per_mr, 0);
        if (ret) {
                dev_err(queue->ctrl->ctrl.device,
                        "failed to initialize MR pool sized %d for QID %d\n",
@@ -614,7 +620,8 @@ static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx)
        if (!ret) {
                set_bit(NVME_RDMA_Q_LIVE, &queue->flags);
        } else {
-               __nvme_rdma_stop_queue(queue);
+               if (test_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
+                       __nvme_rdma_stop_queue(queue);
                dev_info(ctrl->ctrl.device,
                        "failed to connect queue: %d ret=%d\n", idx, ret);
        }
@@ -820,8 +827,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
        if (error)
                goto out_stop_queue;
 
-       ctrl->ctrl.max_hw_sectors =
-               (ctrl->max_fr_pages - 1) << (ilog2(SZ_4K) - 9);
+       ctrl->ctrl.max_segments = ctrl->max_fr_pages;
+       ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9);
 
        blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
 
@@ -1694,6 +1701,14 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
        dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
                 rq->tag, nvme_rdma_queue_idx(queue));
 
+       /*
+        * Restart the timer if a controller reset is already scheduled. Any
+        * timed out commands would be handled before entering the connecting
+        * state.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+               return BLK_EH_RESET_TIMER;
+
        if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
                /*
                 * Teardown immediately if controller times out while starting
index 4ffd5957637a5380424bfd1c8773104b3b2d3e47..7544be84ab3582e27ded19298b45960ce3460d96 100644 (file)
@@ -1042,7 +1042,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
 {
        struct nvme_tcp_queue *queue =
                container_of(w, struct nvme_tcp_queue, io_work);
-       unsigned long start = jiffies + msecs_to_jiffies(1);
+       unsigned long deadline = jiffies + msecs_to_jiffies(1);
 
        do {
                bool pending = false;
@@ -1067,7 +1067,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
                if (!pending)
                        return;
 
-       } while (time_after(jiffies, start)); /* quota is exhausted */
+       } while (!time_after(jiffies, deadline)); /* quota is exhausted */
 
        queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
@@ -1386,7 +1386,9 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl,
        queue->sock->sk->sk_data_ready = nvme_tcp_data_ready;
        queue->sock->sk->sk_state_change = nvme_tcp_state_change;
        queue->sock->sk->sk_write_space = nvme_tcp_write_space;
+#ifdef CONFIG_NET_RX_BUSY_POLL
        queue->sock->sk->sk_ll_usec = 1;
+#endif
        write_unlock_bh(&queue->sock->sk->sk_callback_lock);
 
        return 0;
@@ -2044,6 +2046,14 @@ nvme_tcp_timeout(struct request *rq, bool reserved)
        struct nvme_tcp_ctrl *ctrl = req->queue->ctrl;
        struct nvme_tcp_cmd_pdu *pdu = req->pdu;
 
+       /*
+        * Restart the timer if a controller reset is already scheduled. Any
+        * timed out commands would be handled before entering the connecting
+        * state.
+        */
+       if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
+               return BLK_EH_RESET_TIMER;
+
        dev_warn(ctrl->ctrl.device,
                "queue %d: timeout request %#x type %d\n",
                nvme_tcp_queue_id(req->queue), rq->tag, pdu->hdr.type);
@@ -2126,6 +2136,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
 
        ret = nvme_tcp_map_data(queue, rq);
        if (unlikely(ret)) {
+               nvme_cleanup_cmd(rq);
                dev_err(queue->ctrl->ctrl.device,
                        "Failed to map data (%d)\n", ret);
                return ret;
@@ -2208,7 +2219,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
        struct nvme_tcp_queue *queue = hctx->driver_data;
        struct sock *sk = queue->sock->sk;
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue))
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
                sk_busy_loop(sk, true);
        nvme_tcp_try_recv(queue);
        return queue->nr_cqe;
index de0bff70ebb68b963880724d6bdc052a14cb6ea5..32008d85172bc765122d84b3142e2e93397b1b7d 100644 (file)
 void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
 {
        const struct queue_limits *ql = &bdev_get_queue(bdev)->limits;
-       /* Number of physical blocks per logical block. */
-       const u32 ppl = ql->physical_block_size / ql->logical_block_size;
-       /* Physical blocks per logical block, 0's based. */
-       const __le16 ppl0b = to0based(ppl);
+       /* Number of logical blocks per physical block. */
+       const u32 lpp = ql->physical_block_size / ql->logical_block_size;
+       /* Logical blocks per physical block, 0's based. */
+       const __le16 lpp0b = to0based(lpp);
 
        /*
         * For NVMe 1.2 and later, bit 1 indicates that the fields NAWUN,
@@ -25,9 +25,9 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
         * field from the identify controller data structure should be used.
         */
        id->nsfeat |= 1 << 1;
-       id->nawun = ppl0b;
-       id->nawupf = ppl0b;
-       id->nacwu = ppl0b;
+       id->nawun = lpp0b;
+       id->nawupf = lpp0b;
+       id->nacwu = lpp0b;
 
        /*
         * Bit 4 indicates that the fields NPWG, NPWA, NPDG, NPDA, and
@@ -36,7 +36,7 @@ void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id)
         */
        id->nsfeat |= 1 << 4;
        /* NPWG = Namespace Preferred Write Granularity. 0's based */
-       id->npwg = ppl0b;
+       id->npwg = lpp0b;
        /* NPWA = Namespace Preferred Write Alignment. 0's based */
        id->npwa = id->npwg;
        /* NPDG = Namespace Preferred Deallocate Granularity. 0's based */
index 748a39fca771408717d18dc1c61e4723d62c6078..11f5aea97d1b1f704d30897d6dc3e2f42d86a0a0 100644 (file)
@@ -157,8 +157,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
                iod->sg_table.sgl = iod->first_sgl;
                if (sg_alloc_table_chained(&iod->sg_table,
                                blk_rq_nr_phys_segments(req),
-                               iod->sg_table.sgl, SG_CHUNK_SIZE))
+                               iod->sg_table.sgl, SG_CHUNK_SIZE)) {
+                       nvme_cleanup_cmd(req);
                        return BLK_STS_RESOURCE;
+               }
 
                iod->req.sg = iod->sg_table.sgl;
                iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
index bf4f03474e899246dd560f68ea02cdd5107cd3af..d535080b781f95b0d66b97cad3e822d2782fd772 100644 (file)
@@ -348,8 +348,7 @@ static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
 
        return 0;
 err:
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
        return NVME_SC_INTERNAL;
 }
 
@@ -554,8 +553,7 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
 
        if (queue->nvme_sq.sqhd_disabled) {
                kfree(cmd->iov);
-               if (cmd->req.sg_cnt)
-                       sgl_free(cmd->req.sg);
+               sgl_free(cmd->req.sg);
        }
 
        return 1;
@@ -586,8 +584,7 @@ static int nvmet_try_send_response(struct nvmet_tcp_cmd *cmd,
                return -EAGAIN;
 
        kfree(cmd->iov);
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
        cmd->queue->snd_cmd = NULL;
        nvmet_tcp_put_cmd(cmd);
        return 1;
@@ -1310,8 +1307,7 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd)
        nvmet_req_uninit(&cmd->req);
        nvmet_tcp_unmap_pdu_iovec(cmd);
        kfree(cmd->iov);
-       if (cmd->req.sg_cnt)
-               sgl_free(cmd->req.sg);
+       sgl_free(cmd->req.sg);
 }
 
 static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
index 7989703b883ce64ebe9fa8f58a7c718b6dd10db1..6bd610ee2cd734bf91d61a35c7ddb12df4cec0ba 100644 (file)
@@ -324,8 +324,10 @@ int of_reserved_mem_device_init_by_idx(struct device *dev,
        if (!target)
                return -ENODEV;
 
-       if (!of_device_is_available(target))
+       if (!of_device_is_available(target)) {
+               of_node_put(target);
                return 0;
+       }
 
        rmem = __find_rmem(target);
        of_node_put(target);
index 480a21e2ed39c738cbd1f66945ccdd3c2e0211b7..92e895d8645844a6a87a453259a5819541becd2c 100644 (file)
@@ -1207,6 +1207,7 @@ static int __init unittest_data_add(void)
        of_fdt_unflatten_tree(unittest_data, NULL, &unittest_data_node);
        if (!unittest_data_node) {
                pr_warn("%s: No tree to attach; not running tests\n", __func__);
+               kfree(unittest_data);
                return -ENODATA;
        }
 
index 3b7ffd0234e9c7c44a1bcbd9b924a1879b9e4207..9ff0538ee83a015d68af4425547a9998656d28eb 100644 (file)
@@ -1626,12 +1626,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
                        goto free_regulators;
                }
 
-               ret = regulator_enable(reg);
-               if (ret < 0) {
-                       regulator_put(reg);
-                       goto free_regulators;
-               }
-
                opp_table->regulators[i] = reg;
        }
 
@@ -1645,10 +1639,8 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev,
        return opp_table;
 
 free_regulators:
-       while (i--) {
-               regulator_disable(opp_table->regulators[i]);
-               regulator_put(opp_table->regulators[i]);
-       }
+       while (i != 0)
+               regulator_put(opp_table->regulators[--i]);
 
        kfree(opp_table->regulators);
        opp_table->regulators = NULL;
@@ -1674,10 +1666,8 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table)
        /* Make sure there are no concurrent readers while updating opp_table */
        WARN_ON(!list_empty(&opp_table->opp_list));
 
-       for (i = opp_table->regulator_count - 1; i >= 0; i--) {
-               regulator_disable(opp_table->regulators[i]);
+       for (i = opp_table->regulator_count - 1; i >= 0; i--)
                regulator_put(opp_table->regulators[i]);
-       }
 
        _free_set_opp_data(opp_table);
 
index 1813f5ad5fa23b4f4a0f81ef16c7aab159320e96..1cbb58240b8016383d9f1e9137b7190cff345935 100644 (file)
@@ -77,8 +77,6 @@ static struct dev_pm_opp *_find_opp_of_np(struct opp_table *opp_table,
 {
        struct dev_pm_opp *opp;
 
-       lockdep_assert_held(&opp_table_lock);
-
        mutex_lock(&opp_table->lock);
 
        list_for_each_entry(opp, &opp_table->opp_list, node) {
@@ -665,6 +663,13 @@ static int _of_add_opp_table_v2(struct device *dev, struct opp_table *opp_table)
                return 0;
        }
 
+       /*
+        * Re-initialize list_kref every time we add static OPPs to the OPP
+        * table as the reference count may be 0 after the last tie static OPPs
+        * were removed.
+        */
+       kref_init(&opp_table->list_kref);
+
        /* We have opp-table node now, iterate over it and add OPPs */
        for_each_available_child_of_node(opp_table->np, np) {
                opp = _opp_add_static_v2(opp_table, dev, np);
index ed50502cc65ad29836f5aa47253a0e974ae22471..de8e4e347249179a7cd3085f6a3c6f49e8ffd369 100644 (file)
@@ -678,14 +678,6 @@ static int sba_dma_supported( struct device *dev, u64 mask)
                return(0);
        }
 
-       /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
-        * first, then fall back to 32-bit if that fails.
-        * We are just "encouraging" 32-bit DMA masks here since we can
-        * never allow IOMMU bypass unless we add special support for ZX1.
-        */
-       if (mask > ~0U)
-               return 0;
-
        ioc = GET_IOC(dev);
        if (!ioc)
                return 0;
index e7982af9a5d86efd738640fbe08dba01e4704155..a97e2571a5273094f6cff5a7f77f8c541a89b411 100644 (file)
@@ -958,19 +958,6 @@ void pci_refresh_power_state(struct pci_dev *dev)
        pci_update_current_state(dev, dev->current_state);
 }
 
-/**
- * pci_power_up - Put the given device into D0 forcibly
- * @dev: PCI device to power up
- */
-void pci_power_up(struct pci_dev *dev)
-{
-       if (platform_pci_power_manageable(dev))
-               platform_pci_set_power_state(dev, PCI_D0);
-
-       pci_raw_set_power_state(dev, PCI_D0);
-       pci_update_current_state(dev, PCI_D0);
-}
-
 /**
  * pci_platform_power_transition - Use platform to change device power state
  * @dev: PCI device to handle.
@@ -1153,6 +1140,17 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 }
 EXPORT_SYMBOL(pci_set_power_state);
 
+/**
+ * pci_power_up - Put the given device into D0 forcibly
+ * @dev: PCI device to power up
+ */
+void pci_power_up(struct pci_dev *dev)
+{
+       __pci_start_power_transition(dev, PCI_D0);
+       pci_raw_set_power_state(dev, PCI_D0);
+       pci_update_current_state(dev, PCI_D0);
+}
+
 /**
  * pci_choose_state - Choose the power state of a PCI device
  * @dev: PCI device to be suspended
index 648ddb7f038a3e866c240c64a1636defeba46c60..c6800d220920ecb3613789ac094d8105ae0cdd94 100644 (file)
@@ -87,7 +87,7 @@ FUNC_GROUP_DECL(MACLINK3, L23);
 
 #define K25 7
 SIG_EXPR_LIST_DECL_SESG(K25, MACLINK4, MACLINK4, SIG_DESC_SET(SCU410, 7));
-SIG_EXPR_LIST_DECL_SESG(K25, SDA14, SDA14, SIG_DESC_SET(SCU4B0, 7));
+SIG_EXPR_LIST_DECL_SESG(K25, SDA14, I2C14, SIG_DESC_SET(SCU4B0, 7));
 PIN_DECL_2(K25, GPIOA7, MACLINK4, SDA14);
 FUNC_GROUP_DECL(MACLINK4, K25);
 
@@ -1262,13 +1262,13 @@ GROUP_DECL(SPI1, AB11, AC11, AA11);
 #define AD11 206
 SIG_EXPR_LIST_DECL_SEMG(AD11, SPI1DQ2, QSPI1, SPI1, SIG_DESC_SET(SCU438, 14));
 SIG_EXPR_LIST_DECL_SEMG(AD11, TXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 14));
+                       SIG_DESC_CLEAR(SCU4B8, 2), SIG_DESC_SET(SCU4D8, 14));
 PIN_DECL_2(AD11, GPIOZ6, SPI1DQ2, TXD13);
 
 #define AF10 207
 SIG_EXPR_LIST_DECL_SEMG(AF10, SPI1DQ3, QSPI1, SPI1, SIG_DESC_SET(SCU438, 15));
 SIG_EXPR_LIST_DECL_SEMG(AF10, RXD13, UART13G1, UART13,
-                       SIG_DESC_SET(SCU438, 15));
+                       SIG_DESC_CLEAR(SCU4B8, 3), SIG_DESC_SET(SCU4D8, 15));
 PIN_DECL_2(AF10, GPIOZ7, SPI1DQ3, RXD13);
 
 GROUP_DECL(QSPI1, AB11, AC11, AA11, AD11, AF10);
@@ -1440,91 +1440,85 @@ FUNC_GROUP_DECL(RGMII2, D4, C2, C1, D3, E4, F5, D2, E3, D1, F4, E2, E1);
 FUNC_GROUP_DECL(RMII2, D4, C2, C1, D3, D2, D1, F4, E2, E1);
 
 #define AB4 232
-SIG_EXPR_LIST_DECL_SESG(AB4, SD3CLK, SD3, SIG_DESC_SET(SCU400, 24));
-PIN_DECL_1(AB4, GPIO18D0, SD3CLK);
+SIG_EXPR_LIST_DECL_SEMG(AB4, EMMCCLK, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 24));
+PIN_DECL_1(AB4, GPIO18D0, EMMCCLK);
 
 #define AA4 233
-SIG_EXPR_LIST_DECL_SESG(AA4, SD3CMD, SD3, SIG_DESC_SET(SCU400, 25));
-PIN_DECL_1(AA4, GPIO18D1, SD3CMD);
+SIG_EXPR_LIST_DECL_SEMG(AA4, EMMCCMD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 25));
+PIN_DECL_1(AA4, GPIO18D1, EMMCCMD);
 
 #define AC4 234
-SIG_EXPR_LIST_DECL_SESG(AC4, SD3DAT0, SD3, SIG_DESC_SET(SCU400, 26));
-PIN_DECL_1(AC4, GPIO18D2, SD3DAT0);
+SIG_EXPR_LIST_DECL_SEMG(AC4, EMMCDAT0, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 26));
+PIN_DECL_1(AC4, GPIO18D2, EMMCDAT0);
 
 #define AA5 235
-SIG_EXPR_LIST_DECL_SESG(AA5, SD3DAT1, SD3, SIG_DESC_SET(SCU400, 27));
-PIN_DECL_1(AA5, GPIO18D3, SD3DAT1);
+SIG_EXPR_LIST_DECL_SEMG(AA5, EMMCDAT1, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 27));
+PIN_DECL_1(AA5, GPIO18D3, EMMCDAT1);
 
 #define Y5 236
-SIG_EXPR_LIST_DECL_SESG(Y5, SD3DAT2, SD3, SIG_DESC_SET(SCU400, 28));
-PIN_DECL_1(Y5, GPIO18D4, SD3DAT2);
+SIG_EXPR_LIST_DECL_SEMG(Y5, EMMCDAT2, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 28));
+PIN_DECL_1(Y5, GPIO18D4, EMMCDAT2);
 
 #define AB5 237
-SIG_EXPR_LIST_DECL_SESG(AB5, SD3DAT3, SD3, SIG_DESC_SET(SCU400, 29));
-PIN_DECL_1(AB5, GPIO18D5, SD3DAT3);
+SIG_EXPR_LIST_DECL_SEMG(AB5, EMMCDAT3, EMMCG4, EMMC, SIG_DESC_SET(SCU400, 29));
+PIN_DECL_1(AB5, GPIO18D5, EMMCDAT3);
 
 #define AB6 238
-SIG_EXPR_LIST_DECL_SESG(AB6, SD3CD, SD3, SIG_DESC_SET(SCU400, 30));
-PIN_DECL_1(AB6, GPIO18D6, SD3CD);
+SIG_EXPR_LIST_DECL_SEMG(AB6, EMMCCD, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 30));
+PIN_DECL_1(AB6, GPIO18D6, EMMCCD);
 
 #define AC5 239
-SIG_EXPR_LIST_DECL_SESG(AC5, SD3WP, SD3, SIG_DESC_SET(SCU400, 31));
-PIN_DECL_1(AC5, GPIO18D7, SD3WP);
+SIG_EXPR_LIST_DECL_SEMG(AC5, EMMCWP, EMMCG1, EMMC, SIG_DESC_SET(SCU400, 31));
+PIN_DECL_1(AC5, GPIO18D7, EMMCWP);
 
-FUNC_GROUP_DECL(SD3, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
+GROUP_DECL(EMMCG1, AB4, AA4, AC4, AB6, AC5);
+GROUP_DECL(EMMCG4, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5);
 
 #define Y1 240
 SIG_EXPR_LIST_DECL_SEMG(Y1, FWSPIDCS, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y1, VBCS, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y1, SD3DAT4, SD3DAT4, SIG_DESC_SET(SCU404, 0));
-PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, SD3DAT4);
-FUNC_GROUP_DECL(SD3DAT4, Y1);
+SIG_EXPR_LIST_DECL_SEMG(Y1, EMMCDAT4, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 0));
+PIN_DECL_3(Y1, GPIO18E0, FWSPIDCS, VBCS, EMMCDAT4);
 
 #define Y2 241
 SIG_EXPR_LIST_DECL_SEMG(Y2, FWSPIDCK, FWSPID, FWSPID, SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y2, VBCK, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y2, SD3DAT5, SD3DAT5, SIG_DESC_SET(SCU404, 1));
-PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, SD3DAT5);
-FUNC_GROUP_DECL(SD3DAT5, Y2);
+SIG_EXPR_LIST_DECL_SEMG(Y2, EMMCDAT5, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 1));
+PIN_DECL_3(Y2, GPIO18E1, FWSPIDCK, VBCK, EMMCDAT5);
 
 #define Y3 242
 SIG_EXPR_LIST_DECL_SEMG(Y3, FWSPIDMOSI, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y3, VBMOSI, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y3, SD3DAT6, SD3DAT6, SIG_DESC_SET(SCU404, 2));
-PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, SD3DAT6);
-FUNC_GROUP_DECL(SD3DAT6, Y3);
+SIG_EXPR_LIST_DECL_SEMG(Y3, EMMCDAT6, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 2));
+PIN_DECL_3(Y3, GPIO18E2, FWSPIDMOSI, VBMOSI, EMMCDAT6);
 
 #define Y4 243
 SIG_EXPR_LIST_DECL_SEMG(Y4, FWSPIDMISO, FWSPID, FWSPID,
                        SIG_DESC_SET(SCU500, 3));
 SIG_EXPR_LIST_DECL_SESG(Y4, VBMISO, VB, SIG_DESC_SET(SCU500, 5));
-SIG_EXPR_LIST_DECL_SESG(Y4, SD3DAT7, SD3DAT7, SIG_DESC_SET(SCU404, 3));
-PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, SD3DAT7);
-FUNC_GROUP_DECL(SD3DAT7, Y4);
+SIG_EXPR_LIST_DECL_SEMG(Y4, EMMCDAT7, EMMCG8, EMMC, SIG_DESC_SET(SCU404, 3));
+PIN_DECL_3(Y4, GPIO18E3, FWSPIDMISO, VBMISO, EMMCDAT7);
 
 GROUP_DECL(FWSPID, Y1, Y2, Y3, Y4);
 GROUP_DECL(FWQSPID, Y1, Y2, Y3, Y4, AE12, AF12);
+GROUP_DECL(EMMCG8, AB4, AA4, AC4, AA5, Y5, AB5, AB6, AC5, Y1, Y2, Y3, Y4);
 FUNC_DECL_2(FWSPID, FWSPID, FWQSPID);
 FUNC_GROUP_DECL(VB, Y1, Y2, Y3, Y4);
-
+FUNC_DECL_3(EMMC, EMMCG1, EMMCG4, EMMCG8);
 /*
  * FIXME: Confirm bits and priorities are the right way around for the
  * following 4 pins
  */
 #define AF25 244
-SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
-SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_CLEAR(SCU438, 20),
-                       SIG_DESC_SET(SCU4D8, 20));
+SIG_EXPR_LIST_DECL_SEMG(AF25, I3C3SCL, I3C3, I3C3, SIG_DESC_SET(SCU438, 20));
+SIG_EXPR_LIST_DECL_SESG(AF25, FSI1CLK, FSI1, SIG_DESC_SET(SCU4D8, 20));
 PIN_DECL_(AF25, SIG_EXPR_LIST_PTR(AF25, I3C3SCL),
          SIG_EXPR_LIST_PTR(AF25, FSI1CLK));
 
 #define AE26 245
-SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
-SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_CLEAR(SCU438, 21),
-                       SIG_DESC_SET(SCU4D8, 21));
+SIG_EXPR_LIST_DECL_SEMG(AE26, I3C3SDA, I3C3, I3C3, SIG_DESC_SET(SCU438, 21));
+SIG_EXPR_LIST_DECL_SESG(AE26, FSI1DATA, FSI1, SIG_DESC_SET(SCU4D8, 21));
 PIN_DECL_(AE26, SIG_EXPR_LIST_PTR(AE26, I3C3SDA),
          SIG_EXPR_LIST_PTR(AE26, FSI1DATA));
 
@@ -1533,18 +1527,14 @@ FUNC_DECL_2(I3C3, HVI3C3, I3C3);
 FUNC_GROUP_DECL(FSI1, AF25, AE26);
 
 #define AE25 246
-SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
-SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_CLEAR(SCU438, 22),
-                       SIG_DESC_SET(SCU4D8, 22));
+SIG_EXPR_LIST_DECL_SEMG(AE25, I3C4SCL, I3C4, I3C4, SIG_DESC_SET(SCU438, 22));
+SIG_EXPR_LIST_DECL_SESG(AE25, FSI2CLK, FSI2, SIG_DESC_SET(SCU4D8, 22));
 PIN_DECL_(AE25, SIG_EXPR_LIST_PTR(AE25, I3C4SCL),
          SIG_EXPR_LIST_PTR(AE25, FSI2CLK));
 
 #define AF24 247
-SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
-SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_CLEAR(SCU438, 23),
-                       SIG_DESC_SET(SCU4D8, 23));
+SIG_EXPR_LIST_DECL_SEMG(AF24, I3C4SDA, I3C4, I3C4, SIG_DESC_SET(SCU438, 23));
+SIG_EXPR_LIST_DECL_SESG(AF24, FSI2DATA, FSI2, SIG_DESC_SET(SCU4D8, 23));
 PIN_DECL_(AF24, SIG_EXPR_LIST_PTR(AF24, I3C4SDA),
          SIG_EXPR_LIST_PTR(AF24, FSI2DATA));
 
@@ -1574,6 +1564,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(A3),
        ASPEED_PINCTRL_PIN(AA11),
        ASPEED_PINCTRL_PIN(AA12),
+       ASPEED_PINCTRL_PIN(AA16),
+       ASPEED_PINCTRL_PIN(AA17),
        ASPEED_PINCTRL_PIN(AA23),
        ASPEED_PINCTRL_PIN(AA24),
        ASPEED_PINCTRL_PIN(AA25),
@@ -1585,6 +1577,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AB11),
        ASPEED_PINCTRL_PIN(AB12),
        ASPEED_PINCTRL_PIN(AB15),
+       ASPEED_PINCTRL_PIN(AB16),
+       ASPEED_PINCTRL_PIN(AB17),
        ASPEED_PINCTRL_PIN(AB18),
        ASPEED_PINCTRL_PIN(AB19),
        ASPEED_PINCTRL_PIN(AB22),
@@ -1602,6 +1596,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AC11),
        ASPEED_PINCTRL_PIN(AC12),
        ASPEED_PINCTRL_PIN(AC15),
+       ASPEED_PINCTRL_PIN(AC16),
        ASPEED_PINCTRL_PIN(AC17),
        ASPEED_PINCTRL_PIN(AC18),
        ASPEED_PINCTRL_PIN(AC19),
@@ -1619,6 +1614,7 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AD12),
        ASPEED_PINCTRL_PIN(AD14),
        ASPEED_PINCTRL_PIN(AD15),
+       ASPEED_PINCTRL_PIN(AD16),
        ASPEED_PINCTRL_PIN(AD19),
        ASPEED_PINCTRL_PIN(AD20),
        ASPEED_PINCTRL_PIN(AD22),
@@ -1634,8 +1630,11 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AE12),
        ASPEED_PINCTRL_PIN(AE14),
        ASPEED_PINCTRL_PIN(AE15),
+       ASPEED_PINCTRL_PIN(AE16),
        ASPEED_PINCTRL_PIN(AE18),
        ASPEED_PINCTRL_PIN(AE19),
+       ASPEED_PINCTRL_PIN(AE25),
+       ASPEED_PINCTRL_PIN(AE26),
        ASPEED_PINCTRL_PIN(AE7),
        ASPEED_PINCTRL_PIN(AE8),
        ASPEED_PINCTRL_PIN(AF10),
@@ -1643,6 +1642,8 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(AF12),
        ASPEED_PINCTRL_PIN(AF14),
        ASPEED_PINCTRL_PIN(AF15),
+       ASPEED_PINCTRL_PIN(AF24),
+       ASPEED_PINCTRL_PIN(AF25),
        ASPEED_PINCTRL_PIN(AF7),
        ASPEED_PINCTRL_PIN(AF8),
        ASPEED_PINCTRL_PIN(AF9),
@@ -1792,17 +1793,6 @@ static struct pinctrl_pin_desc aspeed_g6_pins[ASPEED_G6_NR_PINS] = {
        ASPEED_PINCTRL_PIN(Y3),
        ASPEED_PINCTRL_PIN(Y4),
        ASPEED_PINCTRL_PIN(Y5),
-       ASPEED_PINCTRL_PIN(AB16),
-       ASPEED_PINCTRL_PIN(AA17),
-       ASPEED_PINCTRL_PIN(AB17),
-       ASPEED_PINCTRL_PIN(AE16),
-       ASPEED_PINCTRL_PIN(AC16),
-       ASPEED_PINCTRL_PIN(AA16),
-       ASPEED_PINCTRL_PIN(AD16),
-       ASPEED_PINCTRL_PIN(AF25),
-       ASPEED_PINCTRL_PIN(AE26),
-       ASPEED_PINCTRL_PIN(AE25),
-       ASPEED_PINCTRL_PIN(AF24),
 };
 
 static const struct aspeed_pin_group aspeed_g6_groups[] = {
@@ -1976,11 +1966,9 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = {
        ASPEED_PINCTRL_GROUP(SALT9G1),
        ASPEED_PINCTRL_GROUP(SD1),
        ASPEED_PINCTRL_GROUP(SD2),
-       ASPEED_PINCTRL_GROUP(SD3),
-       ASPEED_PINCTRL_GROUP(SD3DAT4),
-       ASPEED_PINCTRL_GROUP(SD3DAT5),
-       ASPEED_PINCTRL_GROUP(SD3DAT6),
-       ASPEED_PINCTRL_GROUP(SD3DAT7),
+       ASPEED_PINCTRL_GROUP(EMMCG1),
+       ASPEED_PINCTRL_GROUP(EMMCG4),
+       ASPEED_PINCTRL_GROUP(EMMCG8),
        ASPEED_PINCTRL_GROUP(SGPM1),
        ASPEED_PINCTRL_GROUP(SGPS1),
        ASPEED_PINCTRL_GROUP(SIOONCTRL),
@@ -2059,6 +2047,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(ADC8),
        ASPEED_PINCTRL_FUNC(ADC9),
        ASPEED_PINCTRL_FUNC(BMCINT),
+       ASPEED_PINCTRL_FUNC(EMMC),
        ASPEED_PINCTRL_FUNC(ESPI),
        ASPEED_PINCTRL_FUNC(ESPIALT),
        ASPEED_PINCTRL_FUNC(FSI1),
@@ -2191,11 +2180,6 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = {
        ASPEED_PINCTRL_FUNC(SALT9),
        ASPEED_PINCTRL_FUNC(SD1),
        ASPEED_PINCTRL_FUNC(SD2),
-       ASPEED_PINCTRL_FUNC(SD3),
-       ASPEED_PINCTRL_FUNC(SD3DAT4),
-       ASPEED_PINCTRL_FUNC(SD3DAT5),
-       ASPEED_PINCTRL_FUNC(SD3DAT6),
-       ASPEED_PINCTRL_FUNC(SD3DAT7),
        ASPEED_PINCTRL_FUNC(SGPM1),
        ASPEED_PINCTRL_FUNC(SGPS1),
        ASPEED_PINCTRL_FUNC(SIOONCTRL),
index a2c0d52e4f7b654cb360e91aa7227a8abaa295ac..140c5ce9fbc11a4614627d69b7bbdefb9c469b7e 100644 (file)
@@ -508,7 +508,7 @@ struct aspeed_pin_desc {
  * @idx: The bit index in the register
  */
 #define SIG_DESC_SET(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 1)
-#define SIG_DESC_CLEAR(reg, idx) SIG_DESC_IP_BIT(ASPEED_IP_SCU, reg, idx, 0)
+#define SIG_DESC_CLEAR(reg, idx) { ASPEED_IP_SCU, reg, BIT_MASK(idx), 0, 0 }
 
 #define SIG_DESC_LIST_SYM(sig, group) sig_descs_ ## sig ## _ ## group
 #define SIG_DESC_LIST_DECL(sig, group, ...) \
@@ -738,6 +738,7 @@ struct aspeed_pin_desc {
        static const char *FUNC_SYM(func)[] = { __VA_ARGS__ }
 
 #define FUNC_DECL_2(func, one, two) FUNC_DECL_(func, #one, #two)
+#define FUNC_DECL_3(func, one, two, three) FUNC_DECL_(func, #one, #two, #three)
 
 #define FUNC_GROUP_DECL(func, ...) \
        GROUP_DECL(func, __VA_ARGS__); \
index 6f7d3a2f2e97d8943302e64089ce21717aebc98c..42f7ab383ad9f4dda27b51e5432bd64833abc6a2 100644 (file)
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0-only
 /*
  * Copyright (C) 2014-2017 Broadcom
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
  */
 
 /*
@@ -853,7 +845,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
 
        /* optional GPIO interrupt support */
        irq = platform_get_irq(pdev, 0);
-       if (irq) {
+       if (irq > 0) {
                struct irq_chip *irqc;
                struct gpio_irq_chip *girq;
 
index 2bf6af7df7d94f0b8e5c09fec4876d7818728673..9fabc451550ead855a0beed670522dcba03d290e 100644 (file)
@@ -640,8 +640,8 @@ static int ns2_pinmux_enable(struct pinctrl_dev *pctrl_dev,
        const struct ns2_pin_function *func;
        const struct ns2_pin_group *grp;
 
-       if (grp_select > pinctrl->num_groups ||
-               func_select > pinctrl->num_functions)
+       if (grp_select >= pinctrl->num_groups ||
+               func_select >= pinctrl->num_functions)
                return -EINVAL;
 
        func = &pinctrl->functions[func_select];
index 44f8ccdbeeff0a6677de61b729bacec41a606e89..9dfdc275ee33aef3adee096a67489a4bc71ebb67 100644 (file)
@@ -43,7 +43,7 @@ static const struct berlin_desc_group as370_soc_pinctrl_groups[] = {
                        BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO5 */
                        BERLIN_PINCTRL_FUNCTION(0x1, "i2s1"), /* DO3 */
                        BERLIN_PINCTRL_FUNCTION(0x2, "pwm"), /* PWM5 */
-                       BERLIN_PINCTRL_FUNCTION(0x3, "spififib"), /* SPDIFIB */
+                       BERLIN_PINCTRL_FUNCTION(0x3, "spdifib"), /* SPDIFIB */
                        BERLIN_PINCTRL_FUNCTION(0x4, "spdifo"), /* SPDIFO */
                        BERLIN_PINCTRL_FUNCTION(0x5, "phy")), /* DBG5 */
        BERLIN_PINCTRL_GROUP("I2S1_MCLK", 0x0, 0x3, 0x12,
index aae51c507f590e77c854a3547779995ea227e7e4..c6251eac8946c339af412763887b1ef93d537555 100644 (file)
@@ -1513,7 +1513,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1521,7 +1520,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "HP"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1529,7 +1527,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {
@@ -1537,7 +1534,6 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
-                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
                },
        },
        {}
index 1f13bcd0e4e1447a63cea190dfca301929ea38af..bc013599a9a3840359d024e97281da069f69349a 100644 (file)
@@ -96,6 +96,7 @@ struct intel_pinctrl_context {
  * @pctldesc: Pin controller description
  * @pctldev: Pointer to the pin controller device
  * @chip: GPIO chip in this pin controller
+ * @irqchip: IRQ chip in this pin controller
  * @soc: SoC/PCH specific pin configuration data
  * @communities: All communities in this pin controller
  * @ncommunities: Number of communities in this pin controller
@@ -108,6 +109,7 @@ struct intel_pinctrl {
        struct pinctrl_desc pctldesc;
        struct pinctrl_dev *pctldev;
        struct gpio_chip chip;
+       struct irq_chip irqchip;
        const struct intel_pinctrl_soc_data *soc;
        struct intel_community *communities;
        size_t ncommunities;
@@ -1139,16 +1141,6 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
        return ret;
 }
 
-static struct irq_chip intel_gpio_irqchip = {
-       .name = "intel-gpio",
-       .irq_ack = intel_gpio_irq_ack,
-       .irq_mask = intel_gpio_irq_mask,
-       .irq_unmask = intel_gpio_irq_unmask,
-       .irq_set_type = intel_gpio_irq_type,
-       .irq_set_wake = intel_gpio_irq_wake,
-       .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
 static int intel_gpio_add_pin_ranges(struct intel_pinctrl *pctrl,
                                     const struct intel_community *community)
 {
@@ -1198,12 +1190,22 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
 
        pctrl->chip = intel_gpio_chip;
 
+       /* Setup GPIO chip */
        pctrl->chip.ngpio = intel_gpio_ngpio(pctrl);
        pctrl->chip.label = dev_name(pctrl->dev);
        pctrl->chip.parent = pctrl->dev;
        pctrl->chip.base = -1;
        pctrl->irq = irq;
 
+       /* Setup IRQ chip */
+       pctrl->irqchip.name = dev_name(pctrl->dev);
+       pctrl->irqchip.irq_ack = intel_gpio_irq_ack;
+       pctrl->irqchip.irq_mask = intel_gpio_irq_mask;
+       pctrl->irqchip.irq_unmask = intel_gpio_irq_unmask;
+       pctrl->irqchip.irq_set_type = intel_gpio_irq_type;
+       pctrl->irqchip.irq_set_wake = intel_gpio_irq_wake;
+       pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND;
+
        ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
        if (ret) {
                dev_err(pctrl->dev, "failed to register gpiochip\n");
@@ -1233,15 +1235,14 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
                return ret;
        }
 
-       ret = gpiochip_irqchip_add(&pctrl->chip, &intel_gpio_irqchip, 0,
+       ret = gpiochip_irqchip_add(&pctrl->chip, &pctrl->irqchip, 0,
                                   handle_bad_irq, IRQ_TYPE_NONE);
        if (ret) {
                dev_err(pctrl->dev, "failed to add irqchip\n");
                return ret;
        }
 
-       gpiochip_set_chained_irqchip(&pctrl->chip, &intel_gpio_irqchip, irq,
-                                    NULL);
+       gpiochip_set_chained_irqchip(&pctrl->chip, &pctrl->irqchip, irq, NULL);
        return 0;
 }
 
index 6462d3ca7ceb9b44b406015397102eadec5c2f99..f2f5fcd9a2374f1f1c8a403724503050fad5dc75 100644 (file)
@@ -183,10 +183,10 @@ static struct armada_37xx_pin_group armada_37xx_nb_groups[] = {
        PIN_GRP_EXTRA("uart2", 9, 2, BIT(1) | BIT(13) | BIT(14) | BIT(19),
                      BIT(1) | BIT(13) | BIT(14), BIT(1) | BIT(19),
                      18, 2, "gpio", "uart"),
-       PIN_GRP_GPIO("led0_od", 11, 1, BIT(20), "led"),
-       PIN_GRP_GPIO("led1_od", 12, 1, BIT(21), "led"),
-       PIN_GRP_GPIO("led2_od", 13, 1, BIT(22), "led"),
-       PIN_GRP_GPIO("led3_od", 14, 1, BIT(23), "led"),
+       PIN_GRP_GPIO_2("led0_od", 11, 1, BIT(20), BIT(20), 0, "led"),
+       PIN_GRP_GPIO_2("led1_od", 12, 1, BIT(21), BIT(21), 0, "led"),
+       PIN_GRP_GPIO_2("led2_od", 13, 1, BIT(22), BIT(22), 0, "led"),
+       PIN_GRP_GPIO_2("led3_od", 14, 1, BIT(23), BIT(23), 0, "led"),
 
 };
 
@@ -221,11 +221,11 @@ static const struct armada_37xx_pin_data armada_37xx_pin_sb = {
 };
 
 static inline void armada_37xx_update_reg(unsigned int *reg,
-                                         unsigned int offset)
+                                         unsigned int *offset)
 {
        /* We never have more than 2 registers */
-       if (offset >= GPIO_PER_REG) {
-               offset -= GPIO_PER_REG;
+       if (*offset >= GPIO_PER_REG) {
+               *offset -= GPIO_PER_REG;
                *reg += sizeof(u32);
        }
 }
@@ -376,7 +376,7 @@ static inline void armada_37xx_irq_update_reg(unsigned int *reg,
 {
        int offset = irqd_to_hwirq(d);
 
-       armada_37xx_update_reg(reg, offset);
+       armada_37xx_update_reg(reg, &offset);
 }
 
 static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
@@ -386,7 +386,7 @@ static int armada_37xx_gpio_direction_input(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        return regmap_update_bits(info->regmap, reg, mask, 0);
@@ -399,7 +399,7 @@ static int armada_37xx_gpio_get_direction(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        regmap_read(info->regmap, reg, &val);
 
@@ -413,7 +413,7 @@ static int armada_37xx_gpio_direction_output(struct gpio_chip *chip,
        unsigned int reg = OUTPUT_EN;
        unsigned int mask, val, ret;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        ret = regmap_update_bits(info->regmap, reg, mask, mask);
@@ -434,7 +434,7 @@ static int armada_37xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
        unsigned int reg = INPUT_VAL;
        unsigned int val, mask;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
 
        regmap_read(info->regmap, reg, &val);
@@ -449,7 +449,7 @@ static void armada_37xx_gpio_set(struct gpio_chip *chip, unsigned int offset,
        unsigned int reg = OUTPUT_VAL;
        unsigned int mask, val;
 
-       armada_37xx_update_reg(&reg, offset);
+       armada_37xx_update_reg(&reg, &offset);
        mask = BIT(offset);
        val = value ? mask : 0;
 
index 974973777395d13ef1409d2d721c84ec1fac99ba..564660028fcca66fab080d10033fab43bfb151b0 100644 (file)
@@ -705,7 +705,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
 
 static int stmfx_pinctrl_remove(struct platform_device *pdev)
 {
-       struct stmfx *stmfx = dev_get_platdata(&pdev->dev);
+       struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent);
 
        return stmfx_function_disable(stmfx,
                                      STMFX_FUNC_GPIO |
index 86cc2cc68fb51b48d6f1308ff1dfff736e4eb2a5..af063f6908460f240e7221cd3076bd00815441a0 100644 (file)
@@ -420,12 +420,6 @@ failed_sensitivity:
 
 static int cmpc_accel_remove_v4(struct acpi_device *acpi)
 {
-       struct input_dev *inputdev;
-       struct cmpc_accel *accel;
-
-       inputdev = dev_get_drvdata(&acpi->dev);
-       accel = dev_get_drvdata(&inputdev->dev);
-
        device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr_v4);
        device_remove_file(&acpi->dev, &cmpc_accel_g_select_attr_v4);
        return cmpc_remove_acpi_notify_device(acpi);
@@ -656,12 +650,6 @@ failed_file:
 
 static int cmpc_accel_remove(struct acpi_device *acpi)
 {
-       struct input_dev *inputdev;
-       struct cmpc_accel *accel;
-
-       inputdev = dev_get_drvdata(&acpi->dev);
-       accel = dev_get_drvdata(&inputdev->dev);
-
        device_remove_file(&acpi->dev, &cmpc_accel_sensitivity_attr);
        return cmpc_remove_acpi_notify_device(acpi);
 }
index ea68f6ed66ae396df2002a258493aa3eb31212e6..ffb8d5d1eb5ffe3b661250b4edc6bb0c37a68119 100644 (file)
@@ -108,6 +108,7 @@ static int i2c_multi_inst_probe(struct platform_device *pdev)
                        if (ret < 0) {
                                dev_dbg(dev, "Error requesting irq at index %d: %d\n",
                                        inst_data[i].irq_idx, ret);
+                               goto error;
                        }
                        board_info.irq = ret;
                        break;
index ab7ae19508676d60387e17c191b79ce3e1e34034..fa97834fdb78e54131a12abd856f92831027e64a 100644 (file)
@@ -293,9 +293,8 @@ static int intel_punit_ipc_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, punit_ipcdev);
 
-       irq = platform_get_irq(pdev, 0);
+       irq = platform_get_irq_optional(pdev, 0);
        if (irq < 0) {
-               punit_ipcdev->irq = 0;
                dev_warn(&pdev->dev, "Invalid IRQ, using polling mode\n");
        } else {
                ret = devm_request_irq(&pdev->dev, irq, intel_punit_ioc,
index 960961fb0d7c5c1b3bf91008bd2eae817a72787f..0517272a268edb82743993d49861e17a1fb30f79 100644 (file)
@@ -97,8 +97,8 @@ config PTP_1588_CLOCK_PCH
        help
          This driver adds support for using the PCH EG20T as a PTP
          clock. The hardware supports time stamping of PTP packets
-         when using the end-to-end delay (E2E) mechansim. The peer
-         delay mechansim (P2P) is not supported.
+         when using the end-to-end delay (E2E) mechanism. The peer
+         delay mechanism (P2P) is not supported.
 
          This clock is only useful if your PTP programs are getting
          hardware time stamps on the PTP Ethernet packets using the
index c61f00b72e156ad4da06c4093c19a248f810d570..a577218d1ab716fc9367e5e4a02289268a7f8ec7 100644 (file)
@@ -507,6 +507,8 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
                ptp_qoriq->regs.etts_regs = base + ETTS_REGS_OFFSET;
        }
 
+       spin_lock_init(&ptp_qoriq->lock);
+
        ktime_get_real_ts64(&now);
        ptp_qoriq_settime(&ptp_qoriq->caps, &now);
 
@@ -514,7 +516,6 @@ int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base,
          (ptp_qoriq->tclk_period & TCLK_PERIOD_MASK) << TCLK_PERIOD_SHIFT |
          (ptp_qoriq->cksel & CKSEL_MASK) << CKSEL_SHIFT;
 
-       spin_lock_init(&ptp_qoriq->lock);
        spin_lock_irqsave(&ptp_qoriq->lock, flags);
 
        regs = &ptp_qoriq->regs;
index 6ad51aa60c03dd1cf8afd00410a53a459f4d0a35..f877e77d9184be47710178e11ba8d30bf8522259 100644 (file)
@@ -472,14 +472,7 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
                if (err)
                        return err;
 
-               /*
-                * .apply might have to round some values in *state, if possible
-                * read the actually implemented value back.
-                */
-               if (chip->ops->get_state)
-                       chip->ops->get_state(chip, pwm, &pwm->state);
-               else
-                       pwm->state = *state;
+               pwm->state = *state;
        } else {
                /*
                 * FIXME: restore the initial state in case of error.
index afe94470b67f7588a28930731d3a4ab6f4d2a331..a46be221dbdcba78160781d223c7ee6e2f2f5665 100644 (file)
@@ -5053,6 +5053,19 @@ regulator_register(const struct regulator_desc *regulator_desc,
 
        init_data = regulator_of_get_init_data(dev, regulator_desc, config,
                                               &rdev->dev.of_node);
+
+       /*
+        * Sometimes not all resources are probed already so we need to take
+        * that into account. This happens most the time if the ena_gpiod comes
+        * from a gpio extender or something else.
+        */
+       if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+               kfree(config);
+               kfree(rdev);
+               ret = -EPROBE_DEFER;
+               goto rinse;
+       }
+
        /*
         * We need to keep track of any GPIO descriptor coming from the
         * device tree until we have handled it over to the core. If the
index 56f3f72d77075e01e61beb9592acbbc221750fd7..710e67081d531e630d972feadfd08b177b6748bd 100644 (file)
@@ -136,7 +136,6 @@ static int da9062_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        unsigned int val, mode = 0;
        int ret;
 
@@ -158,18 +157,7 @@ static unsigned da9062_buck_get_mode(struct regulator_dev *rdev)
                return REGULATOR_MODE_NORMAL;
        }
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -208,21 +196,9 @@ static int da9062_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
 static unsigned da9062_ldo_get_mode(struct regulator_dev *rdev)
 {
        struct da9062_regulator *regl = rdev_get_drvdata(rdev);
-       struct regmap_field *field;
        int ret, val;
 
-       /* Detect current regulator state */
-       ret = regmap_field_read(regl->suspend, &val);
-       if (ret < 0)
-               return 0;
-
-       /* Read regulator mode from proper register, depending on state */
-       if (val)
-               field = regl->suspend_sleep;
-       else
-               field = regl->sleep;
-
-       ret = regmap_field_read(field, &val);
+       ret = regmap_field_read(regl->sleep, &val);
        if (ret < 0)
                return 0;
 
@@ -408,10 +384,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK2,
@@ -444,10 +420,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_BUCK3,
@@ -480,10 +456,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9061_ID_LDO1,
@@ -509,10 +485,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -542,10 +518,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -575,10 +551,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -608,10 +584,10 @@ static const struct da9062_regulator_info local_da9061_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -652,10 +628,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK1_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK1_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK1_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK1_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK2,
@@ -688,10 +664,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK2_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK2_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK2_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK2_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK3,
@@ -724,10 +700,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK3_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK3_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK3_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK3_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_BUCK4,
@@ -760,10 +736,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        __builtin_ffs((int)DA9062AA_BUCK4_MODE_MASK) - 1,
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_BUCK4_MODE_MASK)) - 1),
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VBUCK4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_BUCK4_CONT,
+                       __builtin_ffs((int)DA9062AA_BUCK4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VBUCK4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_BUCK4_CONF_MASK) - 1),
        },
        {
                .desc.id = DA9062_ID_LDO1,
@@ -789,10 +765,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO1_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO1_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO1_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO1_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO1_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO1_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO1_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO1_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -822,10 +798,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO2_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO2_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO2_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO2_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO2_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO2_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO2_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO2_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -855,10 +831,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO3_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO3_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO3_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO3_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO3_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO3_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO3_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO3_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
@@ -888,10 +864,10 @@ static const struct da9062_regulator_info local_da9062_regulator_info[] = {
                        sizeof(unsigned int) * 8 -
                        __builtin_clz((DA9062AA_LDO4_SL_B_MASK)) - 1),
                .suspend_vsel_reg = DA9062AA_VLDO4_B,
-               .suspend = REG_FIELD(DA9062AA_DVC_1,
-                       __builtin_ffs((int)DA9062AA_VLDO4_SEL_MASK) - 1,
+               .suspend = REG_FIELD(DA9062AA_LDO4_CONT,
+                       __builtin_ffs((int)DA9062AA_LDO4_CONF_MASK) - 1,
                        sizeof(unsigned int) * 8 -
-                       __builtin_clz((DA9062AA_VLDO4_SEL_MASK)) - 1),
+                       __builtin_clz(DA9062AA_LDO4_CONF_MASK) - 1),
                .oc_event = REG_FIELD(DA9062AA_STATUS_D,
                        __builtin_ffs((int)DA9062AA_LDO4_ILIM_MASK) - 1,
                        sizeof(unsigned int) * 8 -
index d90a6fd8cbc7468f67324322443254388eff2d17..f81533070058e29db66991311ec4bbbec0c32636 100644 (file)
@@ -144,8 +144,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct fixed_voltage_config *config;
        struct fixed_voltage_data *drvdata;
-       const struct fixed_dev_type *drvtype =
-               of_match_device(dev->driver->of_match_table, dev)->data;
+       const struct fixed_dev_type *drvtype = of_device_get_match_data(dev);
        struct regulator_config cfg = { };
        enum gpiod_flags gflags;
        int ret;
@@ -177,7 +176,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
        drvdata->desc.type = REGULATOR_VOLTAGE;
        drvdata->desc.owner = THIS_MODULE;
 
-       if (drvtype->has_enable_clock) {
+       if (drvtype && drvtype->has_enable_clock) {
                drvdata->desc.ops = &fixed_voltage_clkenabled_ops;
 
                drvdata->enable_clock = devm_clk_get(dev, NULL);
index ff97cc50f2eb96161a44626062e318477d0187dc..9b05e03ba83056d7beedfb4199eed2febb2b1542 100644 (file)
@@ -210,6 +210,7 @@ static const struct regulator_desc lochnagar_regulators[] = {
 
                .enable_time = 3000,
                .ramp_delay = 1000,
+               .off_on_delay = 15000,
 
                .owner = THIS_MODULE,
        },
index afefb29ce1b0decf8f7ab2105be70ee9f5d59b7a..87637eb6bcbcb9ce2e50278f21bb5dfae315d004 100644 (file)
@@ -231,12 +231,12 @@ static int of_get_regulation_constraints(struct device *dev,
                                        "regulator-off-in-suspend"))
                        suspend_state->enabled = DISABLE_IN_SUSPEND;
 
-               if (!of_property_read_u32(np, "regulator-suspend-min-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-min-microvolt", &pval))
                        suspend_state->min_uV = pval;
 
-               if (!of_property_read_u32(np, "regulator-suspend-max-microvolt",
-                                         &pval))
+               if (!of_property_read_u32(suspend_np,
+                               "regulator-suspend-max-microvolt", &pval))
                        suspend_state->max_uV = pval;
 
                if (!of_property_read_u32(suspend_np,
@@ -445,11 +445,20 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
                goto error;
        }
 
-       if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
-               dev_err(dev,
-                       "driver callback failed to parse DT for regulator %pOFn\n",
-                       child);
-               goto error;
+       if (desc->of_parse_cb) {
+               int ret;
+
+               ret = desc->of_parse_cb(child, desc, config);
+               if (ret) {
+                       if (ret == -EPROBE_DEFER) {
+                               of_node_put(child);
+                               return ERR_PTR(-EPROBE_DEFER);
+                       }
+                       dev_err(dev,
+                               "driver callback failed to parse DT for regulator %pOFn\n",
+                               child);
+                       goto error;
+               }
        }
 
        *node = child;
index df5df1c495adb861322e356d59c091bbf94b342f..689537927f6f7d1bf4543d246490460971913117 100644 (file)
@@ -788,7 +788,13 @@ static int pfuze100_regulator_probe(struct i2c_client *client,
 
                /* SW2~SW4 high bit check and modify the voltage value table */
                if (i >= sw_check_start && i <= sw_check_end) {
-                       regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+                       ret = regmap_read(pfuze_chip->regmap,
+                                               desc->vsel_reg, &val);
+                       if (ret) {
+                               dev_err(&client->dev, "Fails to read from the register.\n");
+                               return ret;
+                       }
+
                        if (val & sw_hi) {
                                if (pfuze_chip->chip_id == PFUZE3000 ||
                                        pfuze_chip->chip_id == PFUZE3001) {
index db6c085da65ecf742daeca83824072ab70a3834b..0246b6f99fb50c4232449def2c005b615e31138d 100644 (file)
@@ -735,8 +735,8 @@ static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
 static const struct rpmh_vreg_hw_data pmic5_bob = {
        .regulator_type = VRM,
        .ops = &rpmh_regulator_vrm_bypass_ops,
-       .voltage_range = REGULATOR_LINEAR_RANGE(300000, 0, 135, 32000),
-       .n_voltages = 136,
+       .voltage_range = REGULATOR_LINEAR_RANGE(3000000, 0, 31, 32000),
+       .n_voltages = 32,
        .pmic_mode_map = pmic_mode_map_pmic5_bob,
        .of_map_mode = rpmh_regulator_pmic4_bob_of_map_mode,
 };
index cced1ffb896c1169dba81bde5c9b83a63890ceab..89b9314d64c9dbe5fe152b06c252b1567600857c 100644 (file)
@@ -173,19 +173,14 @@ static int ti_abb_wait_txdone(struct device *dev, struct ti_abb *abb)
        while (timeout++ <= abb->settling_time) {
                status = ti_abb_check_txdone(abb);
                if (status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
@@ -205,19 +200,14 @@ static int ti_abb_clear_all_txdone(struct device *dev, const struct ti_abb *abb)
 
                status = ti_abb_check_txdone(abb);
                if (!status)
-                       break;
+                       return 0;
 
                udelay(1);
        }
 
-       if (timeout > abb->settling_time) {
-               dev_warn_ratelimited(dev,
-                                    "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
-                                    __func__, timeout, readl(abb->int_base));
-               return -ETIMEDOUT;
-       }
-
-       return 0;
+       dev_warn_ratelimited(dev, "%s:TRANXDONE timeout(%duS) int=0x%08x\n",
+                            __func__, timeout, readl(abb->int_base));
+       return -ETIMEDOUT;
 }
 
 /**
index fc53e1e221f0a021a28f0ee2a2f50319a7433af9..c94184d080f84d5bc4c0211159996fe43fbbbc68 100644 (file)
@@ -1553,8 +1553,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
        if (rc == 0) {
                memcpy(&private->vsq, vsq, sizeof(*vsq));
        } else {
-               dev_warn(&device->cdev->dev,
-                        "Reading the volume storage information failed with rc=%d\n", rc);
+               DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+                               "Reading the volume storage information failed with rc=%d", rc);
        }
 
        if (useglobal)
@@ -1737,8 +1737,8 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
        if (rc == 0) {
                dasd_eckd_cpy_ext_pool_data(device, lcq);
        } else {
-               dev_warn(&device->cdev->dev,
-                        "Reading the logical configuration failed with rc=%d\n", rc);
+               DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+                               "Reading the logical configuration failed with rc=%d", rc);
        }
 
        dasd_sfree_request(cqr, cqr->memdev);
@@ -2020,14 +2020,10 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
        dasd_eckd_read_features(device);
 
        /* Read Volume Information */
-       rc = dasd_eckd_read_vol_info(device);
-       if (rc)
-               goto out_err3;
+       dasd_eckd_read_vol_info(device);
 
        /* Read Extent Pool Information */
-       rc = dasd_eckd_read_ext_pool_info(device);
-       if (rc)
-               goto out_err3;
+       dasd_eckd_read_ext_pool_info(device);
 
        /* Read Device Characteristics */
        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -2059,9 +2055,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
        if (readonly)
                set_bit(DASD_FLAG_DEVICE_RO, &device->flags);
 
-       if (dasd_eckd_is_ese(device))
-               dasd_set_feature(device->cdev, DASD_FEATURE_DISCARD, 1);
-
        dev_info(&device->cdev->dev, "New DASD %04X/%02X (CU %04X/%02X) "
                 "with %d cylinders, %d heads, %d sectors%s\n",
                 private->rdc_data.dev_type,
@@ -3695,14 +3688,6 @@ static int dasd_eckd_release_space(struct dasd_device *device,
                return -EINVAL;
 }
 
-static struct dasd_ccw_req *
-dasd_eckd_build_cp_discard(struct dasd_device *device, struct dasd_block *block,
-                          struct request *req, sector_t first_trk,
-                          sector_t last_trk)
-{
-       return dasd_eckd_dso_ras(device, block, req, first_trk, last_trk, 1);
-}
-
 static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
                                               struct dasd_device *startdev,
                                               struct dasd_block *block,
@@ -4447,10 +4432,6 @@ static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
        cmdwtd = private->features.feature[12] & 0x40;
        use_prefix = private->features.feature[8] & 0x01;
 
-       if (req_op(req) == REQ_OP_DISCARD)
-               return dasd_eckd_build_cp_discard(startdev, block, req,
-                                                 first_trk, last_trk);
-
        cqr = NULL;
        if (cdlspecial || dasd_page_cache) {
                /* do nothing, just fall through to the cmd mode single case */
@@ -4729,14 +4710,12 @@ static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
                                                     struct dasd_block *block,
                                                     struct request *req)
 {
-       struct dasd_device *startdev = NULL;
        struct dasd_eckd_private *private;
-       struct dasd_ccw_req *cqr;
+       struct dasd_device *startdev;
        unsigned long flags;
+       struct dasd_ccw_req *cqr;
 
-       /* Discard requests can only be processed on base devices */
-       if (req_op(req) != REQ_OP_DISCARD)
-               startdev = dasd_alias_get_start_dev(base);
+       startdev = dasd_alias_get_start_dev(base);
        if (!startdev)
                startdev = base;
        private = startdev->private;
@@ -5663,14 +5642,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
        dasd_eckd_read_features(device);
 
        /* Read Volume Information */
-       rc = dasd_eckd_read_vol_info(device);
-       if (rc)
-               goto out_err2;
+       dasd_eckd_read_vol_info(device);
 
        /* Read Extent Pool Information */
-       rc = dasd_eckd_read_ext_pool_info(device);
-       if (rc)
-               goto out_err2;
+       dasd_eckd_read_ext_pool_info(device);
 
        /* Read Device Characteristics */
        rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC,
@@ -6521,20 +6496,8 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
        unsigned int logical_block_size = block->bp_block;
        struct request_queue *q = block->request_queue;
        struct dasd_device *device = block->base;
-       struct dasd_eckd_private *private;
-       unsigned int max_discard_sectors;
-       unsigned int max_bytes;
-       unsigned int ext_bytes; /* Extent Size in Bytes */
-       int recs_per_trk;
-       int trks_per_cyl;
-       int ext_limit;
-       int ext_size; /* Extent Size in Cylinders */
        int max;
 
-       private = device->private;
-       trks_per_cyl = private->rdc_data.trk_per_cyl;
-       recs_per_trk = recs_per_track(&private->rdc_data, 0, logical_block_size);
-
        if (device->features & DASD_FEATURE_USERAW) {
                /*
                 * the max_blocks value for raw_track access is 256
@@ -6555,28 +6518,6 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
        /* With page sized segments each segment can be translated into one idaw/tidaw */
        blk_queue_max_segment_size(q, PAGE_SIZE);
        blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
-       if (dasd_eckd_is_ese(device)) {
-               /*
-                * Depending on the extent size, up to UINT_MAX bytes can be
-                * accepted. However, neither DASD_ECKD_RAS_EXTS_MAX nor the
-                * device limits should be exceeded.
-                */
-               ext_size = dasd_eckd_ext_size(device);
-               ext_limit = min(private->real_cyl / ext_size, DASD_ECKD_RAS_EXTS_MAX);
-               ext_bytes = ext_size * trks_per_cyl * recs_per_trk *
-                       logical_block_size;
-               max_bytes = UINT_MAX - (UINT_MAX % ext_bytes);
-               if (max_bytes / ext_bytes > ext_limit)
-                       max_bytes = ext_bytes * ext_limit;
-
-               max_discard_sectors = max_bytes / 512;
-
-               blk_queue_max_discard_sectors(q, max_discard_sectors);
-               blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
-               q->limits.discard_granularity = ext_bytes;
-               q->limits.discard_alignment = ext_bytes;
-       }
 }
 
 static struct ccw_driver dasd_eckd_driver = {
index ba7d2480613b9a4055ec5d97ab86179835afb077..dcdaba689b20cf5592f8249a9cf163fd9f404962 100644 (file)
@@ -113,6 +113,7 @@ struct subchannel {
        enum sch_todo todo;
        struct work_struct todo_work;
        struct schib_config config;
+       u64 dma_mask;
        char *driver_override; /* Driver name to force a match */
 } __attribute__ ((aligned(8)));
 
index 1fbfb0a93f5f13f7bfae028b14e550fd6d01f182..831850435c23b37449a5eaf7f8fee1dfb94a7ea1 100644 (file)
@@ -232,7 +232,12 @@ struct subchannel *css_alloc_subchannel(struct subchannel_id schid,
         * belong to a subchannel need to fit 31 bit width (e.g. ccw).
         */
        sch->dev.coherent_dma_mask = DMA_BIT_MASK(31);
-       sch->dev.dma_mask = &sch->dev.coherent_dma_mask;
+       /*
+        * But we don't have such restrictions imposed on the stuff that
+        * is handled by the streaming API.
+        */
+       sch->dma_mask = DMA_BIT_MASK(64);
+       sch->dev.dma_mask = &sch->dma_mask;
        return sch;
 
 err:
index 131430bd48d903d4f3e2b6c46e1de51b8afa253e..0c6245fc770694406f49d645adfc99f64983ef4d 100644 (file)
@@ -710,7 +710,7 @@ static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
        if (!cdev->private)
                goto err_priv;
        cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
-       cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
+       cdev->dev.dma_mask = sch->dev.dma_mask;
        dma_pool = cio_gp_dma_create(&cdev->dev, 1);
        if (!dma_pool)
                goto err_dma_pool;
index f4ca1d29d61bad716fc655da27f7703fb6cdc0c7..cd164886132fdc048abe3fa36c2f0f6bb8dcba50 100644 (file)
@@ -113,7 +113,7 @@ static void set_impl_params(struct qdio_irq *irq_ptr,
        irq_ptr->qib.pfmt = qib_param_field_format;
        if (qib_param_field)
                memcpy(irq_ptr->qib.parm, qib_param_field,
-                      QDIO_MAX_BUFFERS_PER_Q);
+                      sizeof(irq_ptr->qib.parm));
 
        if (!input_slib_elements)
                goto output;
index 45bdb47f84c10b69572c77cbfa1b1f9ca16b1b5a..9157e728a362dbd5589e2fe786fbd585b5f3f8f5 100644 (file)
@@ -522,8 +522,7 @@ static int zcrypt_release(struct inode *inode, struct file *filp)
        if (filp->f_inode->i_cdev == &zcrypt_cdev) {
                struct zcdn_device *zcdndev;
 
-               if (mutex_lock_interruptible(&ap_perms_mutex))
-                       return -ERESTARTSYS;
+               mutex_lock(&ap_perms_mutex);
                zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
                mutex_unlock(&ap_perms_mutex);
                if (zcdndev) {
index a7868c8133eebfe11fcd81a987c081e2f21f5e29..dda274351c21917018b009e38101a79c132de68d 100644 (file)
@@ -4715,8 +4715,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "qdioest");
 
-       qib_param_field = kzalloc(QDIO_MAX_BUFFERS_PER_Q,
-                                 GFP_KERNEL);
+       qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
        if (!qib_param_field) {
                rc =  -ENOMEM;
                goto out_free_nothing;
index b8799cd3e7aaa83196416b64a8343d019fac4642..bd8143e51747a785070c347b68f5d9314d2846f3 100644 (file)
@@ -2021,10 +2021,10 @@ static bool qeth_l2_vnicc_recover_char(struct qeth_card *card, u32 vnicc,
 static void qeth_l2_vnicc_init(struct qeth_card *card)
 {
        u32 *timeout = &card->options.vnicc.learning_timeout;
+       bool enable, error = false;
        unsigned int chars_len, i;
        unsigned long chars_tmp;
        u32 sup_cmds, vnicc;
-       bool enable, error;
 
        QETH_CARD_TEXT(card, 2, "vniccini");
        /* reset rx_bcast */
@@ -2045,17 +2045,24 @@ static void qeth_l2_vnicc_init(struct qeth_card *card)
        chars_len = sizeof(card->options.vnicc.sup_chars) * BITS_PER_BYTE;
        for_each_set_bit(i, &chars_tmp, chars_len) {
                vnicc = BIT(i);
-               qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds);
-               if (!(sup_cmds & IPA_VNICC_SET_TIMEOUT) ||
-                   !(sup_cmds & IPA_VNICC_GET_TIMEOUT))
+               if (qeth_l2_vnicc_query_cmds(card, vnicc, &sup_cmds)) {
+                       sup_cmds = 0;
+                       error = true;
+               }
+               if ((sup_cmds & IPA_VNICC_SET_TIMEOUT) &&
+                   (sup_cmds & IPA_VNICC_GET_TIMEOUT))
+                       card->options.vnicc.getset_timeout_sup |= vnicc;
+               else
                        card->options.vnicc.getset_timeout_sup &= ~vnicc;
-               if (!(sup_cmds & IPA_VNICC_ENABLE) ||
-                   !(sup_cmds & IPA_VNICC_DISABLE))
+               if ((sup_cmds & IPA_VNICC_ENABLE) &&
+                   (sup_cmds & IPA_VNICC_DISABLE))
+                       card->options.vnicc.set_char_sup |= vnicc;
+               else
                        card->options.vnicc.set_char_sup &= ~vnicc;
        }
        /* enforce assumed default values and recover settings, if changed  */
-       error = qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
-                                             timeout);
+       error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING,
+                                              timeout);
        chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT;
        chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE;
        chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE;
index 296bbc3c4606c178f7b1192edd99e70b9998acc2..cf63916814cca34305b1f28720161633581bb730 100644 (file)
 
 struct kmem_cache *zfcp_fsf_qtcb_cache;
 
+static bool ber_stop = true;
+module_param(ber_stop, bool, 0600);
+MODULE_PARM_DESC(ber_stop,
+                "Shuts down FCP devices for FCP channels that report a bit-error count in excess of its threshold (default on)");
+
 static void zfcp_fsf_request_timeout_handler(struct timer_list *t)
 {
        struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
@@ -236,10 +241,15 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
        case FSF_STATUS_READ_SENSE_DATA_AVAIL:
                break;
        case FSF_STATUS_READ_BIT_ERROR_THRESHOLD:
-               dev_warn(&adapter->ccw_device->dev,
-                        "The error threshold for checksum statistics "
-                        "has been exceeded\n");
                zfcp_dbf_hba_bit_err("fssrh_3", req);
+               if (ber_stop) {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "All paths over this FCP device are disused because of excessive bit errors\n");
+                       zfcp_erp_adapter_shutdown(adapter, 0, "fssrh_b");
+               } else {
+                       dev_warn(&adapter->ccw_device->dev,
+                                "The error threshold for checksum statistics has been exceeded\n");
+               }
                break;
        case FSF_STATUS_READ_LINK_DOWN:
                zfcp_fsf_status_read_link_down(req);
index 1b92f3c19ff32f84403f05cead17ae6a0423608f..90cf4691b8c3592c3f7d6dcda993b96da639ca81 100644 (file)
@@ -898,7 +898,7 @@ config SCSI_SNI_53C710
 
 config 53C700_LE_ON_BE
        bool
-       depends on SCSI_LASI700
+       depends on SCSI_LASI700 || SCSI_SNI_53C710
        default y
 
 config SCSI_STEX
index da00ca5fa5dc35bee76fea78d1c2eba8111396eb..401743e2b4294b2c6467f5a164449d454d3393da 100644 (file)
@@ -1923,6 +1923,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
        struct fcoe_fcp_rsp_payload *fcp_rsp;
        struct bnx2fc_rport *tgt = io_req->tgt;
        struct scsi_cmnd *sc_cmd;
+       u16 scope = 0, qualifier = 0;
 
        /* scsi_cmd_cmpl is called with tgt lock held */
 
@@ -1990,12 +1991,30 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
 
                        if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL ||
                            io_req->cdb_status == SAM_STAT_BUSY) {
-                               /* Set the jiffies + retry_delay_timer * 100ms
-                                  for the rport/tgt */
-                               tgt->retry_delay_timestamp = jiffies +
-                                       fcp_rsp->retry_delay_timer * HZ / 10;
+                               /* Newer array firmware with BUSY or
+                                * TASK_SET_FULL may return a status that needs
+                                * the scope bits masked.
+                                * Or a huge delay timestamp up to 27 minutes
+                                * can result.
+                                */
+                               if (fcp_rsp->retry_delay_timer) {
+                                       /* Upper 2 bits */
+                                       scope = fcp_rsp->retry_delay_timer
+                                               & 0xC000;
+                                       /* Lower 14 bits */
+                                       qualifier = fcp_rsp->retry_delay_timer
+                                               & 0x3FFF;
+                               }
+                               if (scope > 0 && qualifier > 0 &&
+                                       qualifier <= 0x3FEF) {
+                                       /* Set the jiffies +
+                                        * retry_delay_timer * 100ms
+                                        * for the rport/tgt
+                                        */
+                                       tgt->retry_delay_timestamp = jiffies +
+                                               (qualifier * HZ / 10);
+                               }
                        }
-
                }
                if (io_req->fcp_resid)
                        scsi_set_resid(sc_cmd, io_req->fcp_resid);
index 5f8153c37f77f3393edd428e3c4d2b901040887b..76751d6c7f0d4f13e1347e9562ca63c4e34484d1 100644 (file)
@@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file)
        scsi_changer *ch = file->private_data;
 
        scsi_device_put(ch->device);
-       ch->device = NULL;
        file->private_data = NULL;
        kref_put(&ch->ref, ch_destroy);
        return 0;
index 4971104b1817b5acc42fcd03e040ced92f63c179..f32da0ca529e004f448c6f9803b9c57b145a6609 100644 (file)
@@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
        unsigned int tpg_desc_tbl_off;
        unsigned char orig_transition_tmo;
        unsigned long flags;
+       bool transitioning_sense = false;
 
        if (!pg->expiry) {
                unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
@@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                        goto retry;
                }
                /*
-                * Retry on ALUA state transition or if any
-                * UNIT ATTENTION occurred.
+                * If the array returns with 'ALUA state transition'
+                * sense code here it cannot return RTPG data during
+                * transition. So set the state to 'transitioning' directly.
                 */
                if (sense_hdr.sense_key == NOT_READY &&
-                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
-                       err = SCSI_DH_RETRY;
-               else if (sense_hdr.sense_key == UNIT_ATTENTION)
+                   sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) {
+                       transitioning_sense = true;
+                       goto skip_rtpg;
+               }
+               /*
+                * Retry on any other UNIT ATTENTION occurred.
+                */
+               if (sense_hdr.sense_key == UNIT_ATTENTION)
                        err = SCSI_DH_RETRY;
                if (err == SCSI_DH_RETRY &&
                    pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
@@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                off = 8 + (desc[7] * 4);
        }
 
+ skip_rtpg:
        spin_lock_irqsave(&pg->lock, flags);
+       if (transitioning_sense)
+               pg->state = SCSI_ACCESS_STATE_TRANSITIONING;
+
        sdev_printk(KERN_INFO, sdev,
                    "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
                    ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
index d1513fdf1e000546bddf27d2f75fbbf82f2a53a7..0847e682797be82133288c38fbb4d6f2630318d9 100644 (file)
@@ -3683,7 +3683,7 @@ void hisi_sas_debugfs_work_handler(struct work_struct *work)
 }
 EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
 
-void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
 {
        struct device *dev = hisi_hba->dev;
        int i;
@@ -3705,7 +3705,7 @@ void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba)
                devm_kfree(dev, hisi_hba->debugfs_port_reg[i]);
 }
 
-int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
+static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba)
 {
        const struct hisi_sas_hw *hw = hisi_hba->hw;
        struct device *dev = hisi_hba->dev;
@@ -3796,7 +3796,7 @@ fail:
        return -ENOMEM;
 }
 
-void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
+static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
 {
        hisi_hba->debugfs_bist_dentry =
                        debugfs_create_dir("bist", hisi_hba->debugfs_dir);
index ac39ed79ccaa15cebf9a3b7d86230130f4d5b076..216e557f703e6c0b466fc29dddc05364b3127bf2 100644 (file)
@@ -5477,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h,
                return SCSI_MLQUEUE_HOST_BUSY;
        }
 
+       c->device = dev;
+
        enqueue_cmd_and_start_io(h, c);
        /* the cmd'll come back via intr handler in complete_scsi_command()  */
        return 0;
@@ -5548,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_raid_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
@@ -5555,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
                hpsa_cmd_init(h, c->cmdindex, c);
                c->cmd_type = CMD_SCSI;
                c->scsi_cmd = cmd;
+               c->device = dev;
                rc = hpsa_scsi_ioaccel_direct_map(h, c);
                if (rc < 0)     /* scsi_dma_map failed. */
                        rc = SCSI_MLQUEUE_HOST_BUSY;
index e91377a4cafebad115a84465f02ebd59bc5140db..e8813d26e59415b1a584900c2ed7772438a74bc3 100644 (file)
@@ -9055,7 +9055,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                }
        }
 
-#if defined(BUILD_NVME)
        /* Clear NVME stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
                for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9063,7 +9062,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
                               sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat));
                }
        }
-#endif
 
        /* Clear SCSI stats */
        if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
index f4b879d25fe9d33229d40a5ab5675577b539c45a..fc6e4546d738a59ab1be40271f406c520437dca2 100644 (file)
@@ -851,9 +851,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
        if (!(vport->fc_flag & FC_PT2PT)) {
                /* Check config parameter use-adisc or FCP-2 */
-               if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) ||
+               if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) ||
                    ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) &&
-                    (ndlp->nlp_type & NLP_FCP_TARGET))) {
+                    (ndlp->nlp_type & NLP_FCP_TARGET)))) {
                        spin_lock_irq(shost->host_lock);
                        ndlp->nlp_flag |= NLP_NPR_ADISC;
                        spin_unlock_irq(shost->host_lock);
index fe1097666de4375e7dfe9a0febf4ddf7d9cff026..6822cd9ff8f1ef460dea74f002d52bb7ef043b28 100644 (file)
@@ -528,7 +528,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                        list_del_init(&psb->list);
                        psb->exch_busy = 0;
                        psb->status = IOSTAT_SUCCESS;
-#ifdef BUILD_NVME
                        if (psb->cur_iocbq.iocb_flag == LPFC_IO_NVME) {
                                qp->abts_nvme_io_bufs--;
                                spin_unlock(&qp->abts_io_buf_list_lock);
@@ -536,7 +535,6 @@ lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba,
                                lpfc_sli4_nvme_xri_aborted(phba, axri, psb);
                                return;
                        }
-#endif
                        qp->abts_scsi_io_bufs--;
                        spin_unlock(&qp->abts_io_buf_list_lock);
 
index a0c6945b81392e1f07e02934f9702e0ac783197f..614f78dddafe0f0ae1e78b0f22beafc081b57993 100644 (file)
@@ -7866,7 +7866,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
        if (sli4_hba->hdwq) {
                for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
                        eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
-                       if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
+                       if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
                                fpeq = eq;
                                break;
                        }
index 45a66048801be99b9d25204dd2a69c2f0821fb36..ff6d4aa924213074df0624a273e9e5ba3c1d9074 100644 (file)
@@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                 */
                if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
                    pdev->subsystem_device == 0xC000)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Now check the magic signature byte */
                pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
                if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
-                       return -ENODEV;
+                       goto out_disable_device;
                /* Ok it is probably a megaraid */
        }
 
index 1659d35cd37b5704436a29a1fb47fd02491712fe..59ca98f12afd5edf3f4faeb9995b3136b98f181a 100644 (file)
@@ -596,7 +596,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type)
                tmp_prio = get->operational.app_prio.fcoe;
                if (qedf_default_prio > -1)
                        qedf->prio = qedf_default_prio;
-               else if (tmp_prio < 0 || tmp_prio > 7) {
+               else if (tmp_prio > 7) {
                        QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
                            "FIP/FCoE prio %d out of range, setting to %d.\n",
                            tmp_prio, QEDF_DEFAULT_PRIO);
index 8190c2a27584148dda0804623d6744f0ef00eb83..7259bce85e0e3e54be6ed45d8cd82593685c7390 100644 (file)
@@ -440,9 +440,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                valid = 0;
                if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
                        valid = 1;
-               else if (start == (ha->flt_region_boot * 4) ||
-                   start == (ha->flt_region_fw * 4))
-                       valid = 1;
                else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
                        valid = 1;
                if (!valid) {
@@ -489,8 +486,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
                    "Writing flash region -- 0x%x/0x%x.\n",
                    ha->optrom_region_start, ha->optrom_region_size);
 
-               ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+               rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
                    ha->optrom_region_start, ha->optrom_region_size);
+               if (rval)
+                       rval = -EIO;
                break;
        default:
                rval = -EINVAL;
@@ -2920,6 +2919,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
        struct qla_hw_data *ha = vha->hw;
        uint16_t id = vha->vp_idx;
 
+       set_bit(VPORT_DELETE, &vha->dpc_flags);
+
        while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
            test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
                msleep(1000);
index 28d587a89ba61efcab7a497e8c9319a7b9272359..99f0a1a08143e0864e0ba69718945fe538ffd447 100644 (file)
@@ -253,7 +253,7 @@ qla2x00_process_els(struct bsg_job *bsg_job)
        srb_t *sp;
        const char *type;
        int req_sg_cnt, rsp_sg_cnt;
-       int rval =  (DRIVER_ERROR << 16);
+       int rval =  (DID_ERROR << 16);
        uint16_t nextlid = 0;
 
        if (bsg_request->msgcode == FC_BSG_RPT_ELS) {
@@ -432,7 +432,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job)
        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
        scsi_qla_host_t *vha = shost_priv(host);
        struct qla_hw_data *ha = vha->hw;
-       int rval = (DRIVER_ERROR << 16);
+       int rval = (DID_ERROR << 16);
        int req_sg_cnt, rsp_sg_cnt;
        uint16_t loop_id;
        struct fc_port *fcport;
@@ -1950,7 +1950,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job)
        struct Scsi_Host *host = fc_bsg_to_shost(bsg_job);
        scsi_qla_host_t *vha = shost_priv(host);
        struct qla_hw_data *ha = vha->hw;
-       int rval = (DRIVER_ERROR << 16);
+       int rval = (DID_ERROR << 16);
        struct qla_mt_iocb_rqst_fx00 *piocb_rqst;
        srb_t *sp;
        int req_sg_cnt = 0, rsp_sg_cnt = 0;
index 873a6aef1c5c0f34825bf023afa026e7fb6c7a4a..6ffa9877c28b46bbd371dd949835c06fae92bad3 100644 (file)
@@ -2396,6 +2396,7 @@ typedef struct fc_port {
        unsigned int query:1;
        unsigned int id_changed:1;
        unsigned int scan_needed:1;
+       unsigned int n2n_flag:1;
 
        struct completion nvme_del_done;
        uint32_t nvme_prli_service_param;
@@ -2446,7 +2447,6 @@ typedef struct fc_port {
        uint8_t fc4_type;
        uint8_t fc4f_nvme;
        uint8_t scan_state;
-       uint8_t n2n_flag;
 
        unsigned long last_queue_full;
        unsigned long last_ramp_up;
@@ -3036,6 +3036,7 @@ enum scan_flags_t {
 enum fc4type_t {
        FS_FC4TYPE_FCP  = BIT_0,
        FS_FC4TYPE_NVME = BIT_1,
+       FS_FCP_IS_N2N = BIT_7,
 };
 
 struct fab_scan_rp {
@@ -4394,6 +4395,7 @@ typedef struct scsi_qla_host {
 #define IOCB_WORK_ACTIVE       31
 #define SET_ZIO_THRESHOLD_NEEDED 32
 #define ISP_ABORT_TO_ROM       33
+#define VPORT_DELETE           34
 
        unsigned long   pci_flags;
 #define PFLG_DISCONNECTED      0       /* PCI device removed */
index dc0e3667631320a36a798fcd056e02c1ff5d8e45..5298ed10059f2f478e06b6b70c7a0f8f9d13452c 100644 (file)
@@ -3102,7 +3102,8 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id)
 {
        struct qla_work_evt *e;
 
-       if (test_bit(UNLOADING, &vha->dpc_flags))
+       if (test_bit(UNLOADING, &vha->dpc_flags) ||
+           (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)))
                return 0;
 
        e = qla2x00_alloc_work(vha, QLA_EVT_GPNID);
index 643d2324082e9044f03246792113f65d5f3c7fa3..1d041313ec522bc659a154120cbe071979957b1a 100644 (file)
@@ -746,12 +746,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                        break;
                default:
                        if ((id.b24 != fcport->d_id.b24 &&
-                           fcport->d_id.b24) ||
+                           fcport->d_id.b24 &&
+                           fcport->loop_id != FC_NO_LOOP_ID) ||
                            (fcport->loop_id != FC_NO_LOOP_ID &&
                                fcport->loop_id != loop_id)) {
                                ql_dbg(ql_dbg_disc, vha, 0x20e3,
                                    "%s %d %8phC post del sess\n",
                                    __func__, __LINE__, fcport->port_name);
+                               if (fcport->n2n_flag)
+                                       fcport->d_id.b24 = 0;
                                qlt_schedule_sess_for_deletion(fcport);
                                return;
                        }
@@ -759,6 +762,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
                }
 
                fcport->loop_id = loop_id;
+               if (fcport->n2n_flag)
+                       fcport->d_id.b24 = id.b24;
 
                wwn = wwn_to_u64(fcport->port_name);
                qlt_find_sess_invalidate_other(vha, wwn,
@@ -972,7 +977,7 @@ static void qla24xx_async_gnl_sp_done(srb_t *sp, int res)
                wwn = wwn_to_u64(e->port_name);
 
                ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8,
-                   "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n",
+                   "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n",
                    __func__, (void *)&wwn, e->port_id[2], e->port_id[1],
                    e->port_id[0], e->current_login_state, e->last_login_state,
                    (loop_id & 0x7fff));
@@ -1499,7 +1504,8 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
             (fcport->fw_login_state == DSC_LS_PRLI_PEND)))
                return 0;
 
-       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) {
+       if (fcport->fw_login_state == DSC_LS_PLOGI_COMP &&
+           !N2N_TOPO(vha->hw)) {
                if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) {
                        set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
                        return 0;
@@ -1570,8 +1576,9 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport)
                                qla24xx_post_gpdb_work(vha, fcport, 0);
                        }  else {
                                ql_dbg(ql_dbg_disc, vha, 0x2118,
-                                   "%s %d %8phC post NVMe PRLI\n",
-                                   __func__, __LINE__, fcport->port_name);
+                                   "%s %d %8phC post %s PRLI\n",
+                                   __func__, __LINE__, fcport->port_name,
+                                   fcport->fc4f_nvme ? "NVME" : "FC");
                                qla24xx_post_prli_work(vha, fcport);
                        }
                        break;
@@ -1853,17 +1860,38 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
                        break;
                }
 
-               if (ea->fcport->n2n_flag) {
+               if (ea->fcport->fc4f_nvme) {
                        ql_dbg(ql_dbg_disc, vha, 0x2118,
                                "%s %d %8phC post fc4 prli\n",
                                __func__, __LINE__, ea->fcport->port_name);
                        ea->fcport->fc4f_nvme = 0;
-                       ea->fcport->n2n_flag = 0;
                        qla24xx_post_prli_work(vha, ea->fcport);
+                       return;
+               }
+
+               /* at this point both PRLI NVME & PRLI FCP failed */
+               if (N2N_TOPO(vha->hw)) {
+                       if (ea->fcport->n2n_link_reset_cnt < 3) {
+                               ea->fcport->n2n_link_reset_cnt++;
+                               /*
+                                * remote port is not sending Plogi. Reset
+                                * link to kick start his state machine
+                                */
+                               set_bit(N2N_LINK_RESET, &vha->dpc_flags);
+                       } else {
+                               ql_log(ql_log_warn, vha, 0x2119,
+                                   "%s %d %8phC Unable to reconnect\n",
+                                   __func__, __LINE__, ea->fcport->port_name);
+                       }
+               } else {
+                       /*
+                        * switch connect. login failed. Take connection
+                        * down and allow relogin to retrigger
+                        */
+                       ea->fcport->flags &= ~FCF_ASYNC_SENT;
+                       ea->fcport->keep_nport_handle = 0;
+                       qlt_schedule_sess_for_deletion(ea->fcport);
                }
-               ql_dbg(ql_dbg_disc, vha, 0x2119,
-                   "%s %d %8phC unhandle event of %x\n",
-                   __func__, __LINE__, ea->fcport->port_name, ea->data[0]);
                break;
        }
 }
@@ -3190,7 +3218,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
 
                for (j = 0; j < 2; j++, fwdt++) {
                        if (!fwdt->template) {
-                               ql_log(ql_log_warn, vha, 0x00ba,
+                               ql_dbg(ql_dbg_init, vha, 0x00ba,
                                    "-> fwdt%u no template\n", j);
                                continue;
                        }
@@ -4986,28 +5014,47 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
        unsigned long flags;
 
        /* Inititae N2N login. */
-       if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
-               /* borrowing */
-               u32 *bp, i, sz;
-
-               memset(ha->init_cb, 0, ha->init_cb_size);
-               sz = min_t(int, sizeof(struct els_plogi_payload),
-                   ha->init_cb_size);
-               rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
-                   (void *)ha->init_cb, sz);
-               if (rval == QLA_SUCCESS) {
-                       bp = (uint32_t *)ha->init_cb;
-                       for (i = 0; i < sz/4 ; i++, bp++)
-                               *bp = cpu_to_be32(*bp);
+       if (N2N_TOPO(ha)) {
+               if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
+                       /* borrowing */
+                       u32 *bp, i, sz;
+
+                       memset(ha->init_cb, 0, ha->init_cb_size);
+                       sz = min_t(int, sizeof(struct els_plogi_payload),
+                           ha->init_cb_size);
+                       rval = qla24xx_get_port_login_templ(vha,
+                           ha->init_cb_dma, (void *)ha->init_cb, sz);
+                       if (rval == QLA_SUCCESS) {
+                               bp = (uint32_t *)ha->init_cb;
+                               for (i = 0; i < sz/4 ; i++, bp++)
+                                       *bp = cpu_to_be32(*bp);
 
-                       memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb,
-                           sizeof(ha->plogi_els_payld.data));
-                       set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
-               } else {
-                       ql_dbg(ql_dbg_init, vha, 0x00d1,
-                           "PLOGI ELS param read fail.\n");
+                               memcpy(&ha->plogi_els_payld.data,
+                                   (void *)ha->init_cb,
+                                   sizeof(ha->plogi_els_payld.data));
+                               set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+                       } else {
+                               ql_dbg(ql_dbg_init, vha, 0x00d1,
+                                   "PLOGI ELS param read fail.\n");
+                               goto skip_login;
+                       }
+               }
+
+               list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                       if (fcport->n2n_flag) {
+                               qla24xx_fcport_handle_login(vha, fcport);
+                               return QLA_SUCCESS;
+                       }
+               }
+skip_login:
+               spin_lock_irqsave(&vha->work_lock, flags);
+               vha->scan.scan_retry++;
+               spin_unlock_irqrestore(&vha->work_lock, flags);
+
+               if (vha->scan.scan_retry < MAX_SCAN_RETRIES) {
+                       set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                }
-               return QLA_SUCCESS;
        }
 
        found_devs = 0;
index e92e52aa6e9b846229c1f403c56c02ea92a3eedb..518eb954cf42cf8374e47254b47c87801891b0ea 100644 (file)
@@ -2656,9 +2656,10 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
        els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
        els_iocb->port_id[1] = sp->fcport->d_id.b.area;
        els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
-       els_iocb->s_id[0] = vha->d_id.b.al_pa;
-       els_iocb->s_id[1] = vha->d_id.b.area;
-       els_iocb->s_id[2] = vha->d_id.b.domain;
+       /* For SID the byte order is different than DID */
+       els_iocb->s_id[1] = vha->d_id.b.al_pa;
+       els_iocb->s_id[2] = vha->d_id.b.area;
+       els_iocb->s_id[0] = vha->d_id.b.domain;
 
        if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
                els_iocb->control_flags = 0;
index 4c26630c1c3eeee71a61d1080dddb3bf3474cef6..009fd5a33fcdecaf03d59ba57cf72cdfc6b84258 100644 (file)
@@ -2837,8 +2837,6 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
        if (sense_len == 0) {
                rsp->status_srb = NULL;
                sp->done(sp, cp->result);
-       } else {
-               WARN_ON_ONCE(true);
        }
 }
 
index 4c858e2d0ea8f5a3cddb8bd53890fdc88478ab9b..4a1f21c11758ea5b8ee0e2222bfb4f05fc49dc84 100644 (file)
@@ -702,6 +702,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                mcp->mb[2] = LSW(risc_addr);
                mcp->mb[3] = 0;
                mcp->mb[4] = 0;
+               mcp->mb[11] = 0;
                ha->flags.using_lr_setting = 0;
                if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
                    IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -746,7 +747,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
                if (ha->flags.exchoffld_enabled)
                        mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD;
 
-               mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1;
+               mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11;
                mcp->in_mb |= MBX_3 | MBX_2 | MBX_1;
        } else {
                mcp->mb[1] = LSW(risc_addr);
@@ -2249,7 +2250,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
+       ql_dbg(ql_dbg_disc, vha, 0x105a,
            "Entered %s.\n", __func__);
 
        if (IS_CNA_CAPABLE(vha->hw)) {
@@ -3883,14 +3884,24 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                case TOPO_N2N:
                        ha->current_topology = ISP_CFG_N;
                        spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+                       list_for_each_entry(fcport, &vha->vp_fcports, list) {
+                               fcport->scan_state = QLA_FCPORT_SCAN;
+                               fcport->n2n_flag = 0;
+                       }
+
                        fcport = qla2x00_find_fcport_by_wwpn(vha,
                            rptid_entry->u.f1.port_name, 1);
                        spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
 
                        if (fcport) {
                                fcport->plogi_nack_done_deadline = jiffies + HZ;
-                               fcport->dm_login_expire = jiffies + 3*HZ;
+                               fcport->dm_login_expire = jiffies + 2*HZ;
                                fcport->scan_state = QLA_FCPORT_FOUND;
+                               fcport->n2n_flag = 1;
+                               fcport->keep_nport_handle = 1;
+                               if (vha->flags.nvme_enabled)
+                                       fcport->fc4f_nvme = 1;
+
                                switch (fcport->disc_state) {
                                case DSC_DELETED:
                                        set_bit(RELOGIN_NEEDED,
@@ -3924,7 +3935,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                                    rptid_entry->u.f1.port_name,
                                    rptid_entry->u.f1.node_name,
                                    NULL,
-                                   FC4_TYPE_UNKNOWN);
+                                   FS_FCP_IS_N2N);
                        }
 
                        /* if our portname is higher then initiate N2N login */
@@ -4023,6 +4034,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
 
                list_for_each_entry(fcport, &vha->vp_fcports, list) {
                        fcport->scan_state = QLA_FCPORT_SCAN;
+                       fcport->n2n_flag = 0;
                }
 
                fcport = qla2x00_find_fcport_by_wwpn(vha,
@@ -4032,6 +4044,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                        fcport->login_retry = vha->hw->login_retry_count;
                        fcport->plogi_nack_done_deadline = jiffies + HZ;
                        fcport->scan_state = QLA_FCPORT_FOUND;
+                       fcport->keep_nport_handle = 1;
+                       fcport->n2n_flag = 1;
+                       fcport->d_id.b.domain =
+                               rptid_entry->u.f2.remote_nport_id[2];
+                       fcport->d_id.b.area =
+                               rptid_entry->u.f2.remote_nport_id[1];
+                       fcport->d_id.b.al_pa =
+                               rptid_entry->u.f2.remote_nport_id[0];
                }
        }
 }
index 1a9a11ae728546cf20b5cdcdf4fdea6a9efbebe9..6afad68e5ba2152da404e32f404b2960e760c1b5 100644 (file)
@@ -66,6 +66,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
        uint16_t vp_id;
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags = 0;
+       u8 i;
 
        mutex_lock(&ha->vport_lock);
        /*
@@ -75,8 +76,9 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
         * ensures no active vp_list traversal while the vport is removed
         * from the queue)
         */
-       wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count),
-           10*HZ);
+       for (i = 0; i < 10 && atomic_read(&vha->vref_count); i++)
+               wait_event_timeout(vha->vref_waitq,
+                   atomic_read(&vha->vref_count), HZ);
 
        spin_lock_irqsave(&ha->vport_slock, flags);
        if (atomic_read(&vha->vref_count)) {
@@ -262,6 +264,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
        spin_lock_irqsave(&ha->vport_slock, flags);
        list_for_each_entry(vha, &ha->vp_list, list) {
                if (vha->vp_idx) {
+                       if (test_bit(VPORT_DELETE, &vha->dpc_flags))
+                               continue;
+
                        atomic_inc(&vha->vref_count);
                        spin_unlock_irqrestore(&ha->vport_slock, flags);
 
@@ -300,6 +305,20 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
 int
 qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
 {
+       fc_port_t *fcport;
+
+       /*
+        * To exclusively reset vport, we need to log it out first.
+        * Note: This control_vp can fail if ISP reset is already
+        * issued, this is expected, as the vp would be already
+        * logged out due to ISP reset.
+        */
+       if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+               qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
+               list_for_each_entry(fcport, &vha->vp_fcports, list)
+                       fcport->logout_on_delete = 0;
+       }
+
        /*
         * Physical port will do most of the abort and recovery work. We can
         * just treat it as a loop down
@@ -312,16 +331,9 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
                        atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
        }
 
-       /*
-        * To exclusively reset vport, we need to log it out first.  Note: this
-        * control_vp can fail if ISP reset is already issued, this is
-        * expected, as the vp would be already logged out due to ISP reset.
-        */
-       if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
-               qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
-
        ql_dbg(ql_dbg_taskm, vha, 0x801d,
            "Scheduling enable of Vport %d.\n", vha->vp_idx);
+
        return qla24xx_enable_vp(vha);
 }
 
index 73db01e3b4e4a99e6e42983a89f1c2a8e6b58234..337162ac3a7716c46bc18774410f7a1796e2b33a 100644 (file)
@@ -1115,9 +1115,15 @@ static inline int test_fcport_count(scsi_qla_host_t *vha)
 void
 qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha)
 {
+       u8 i;
+
        qla2x00_mark_all_devices_lost(vha, 0);
 
-       wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ);
+       for (i = 0; i < 10; i++)
+               wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha),
+                   HZ);
+
+       flush_workqueue(vha->hw->wq);
 }
 
 /*
@@ -3218,6 +3224,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
 
        ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+       if (unlikely(!ha->wq)) {
+               ret = -ENOMEM;
+               goto probe_failed;
+       }
 
        if (ha->isp_ops->initialize_adapter(base_vha)) {
                ql_log(ql_log_fatal, base_vha, 0x00d6,
@@ -3525,6 +3535,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
                qla2x00_try_to_stop_firmware(vha);
        }
 
+       /* Disable timer */
+       if (vha->timer_active)
+               qla2x00_stop_timer(vha);
+
        /* Turn adapter off line */
        vha->flags.online = 0;
 
@@ -5036,6 +5050,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
 
                        memcpy(fcport->port_name, e->u.new_sess.port_name,
                            WWN_SIZE);
+
+                       if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N)
+                               fcport->n2n_flag = 1;
+
                } else {
                        ql_dbg(ql_dbg_disc, vha, 0xffff,
                                   "%s %8phC mem alloc fail.\n",
@@ -5134,11 +5152,9 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
                        if (dfcp)
                                qlt_schedule_sess_for_deletion(tfcp);
 
-
-                       if (N2N_TOPO(vha->hw))
-                               fcport->flags &= ~FCF_FABRIC_DEVICE;
-
                        if (N2N_TOPO(vha->hw)) {
+                               fcport->flags &= ~FCF_FABRIC_DEVICE;
+                               fcport->keep_nport_handle = 1;
                                if (vha->flags.nvme_enabled) {
                                        fcport->fc4f_nvme = 1;
                                        fcport->n2n_flag = 1;
index 0ffda617161483aaf6e83758ff4455c4dc5cc519..a06e56224a55984392dddc60fbf0377de158bea0 100644 (file)
@@ -953,7 +953,7 @@ void qlt_free_session_done(struct work_struct *work)
        struct qla_hw_data *ha = vha->hw;
        unsigned long flags;
        bool logout_started = false;
-       scsi_qla_host_t *base_vha;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
        struct qlt_plogi_ack_t *own =
                sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
 
@@ -1020,6 +1020,7 @@ void qlt_free_session_done(struct work_struct *work)
 
        if (logout_started) {
                bool traced = false;
+               u16 cnt = 0;
 
                while (!READ_ONCE(sess->logout_completed)) {
                        if (!traced) {
@@ -1029,6 +1030,9 @@ void qlt_free_session_done(struct work_struct *work)
                                traced = true;
                        }
                        msleep(100);
+                       cnt++;
+                       if (cnt > 200)
+                               break;
                }
 
                ql_dbg(ql_dbg_disc, vha, 0xf087,
@@ -1101,6 +1105,7 @@ void qlt_free_session_done(struct work_struct *work)
        }
 
        spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+       sess->free_pending = 0;
 
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
            "Unregistration of sess %p %8phC finished fcp_cnt %d\n",
@@ -1109,17 +1114,9 @@ void qlt_free_session_done(struct work_struct *work)
        if (tgt && (tgt->sess_count == 0))
                wake_up_all(&tgt->waitQ);
 
-       if (vha->fcport_count == 0)
-               wake_up_all(&vha->fcport_waitQ);
-
-       base_vha = pci_get_drvdata(ha->pdev);
-
-       sess->free_pending = 0;
-
-       if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags))
-               return;
-
-       if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
+       if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) &&
+           !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) &&
+           (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) {
                switch (vha->host->active_mode) {
                case MODE_INITIATOR:
                case MODE_DUAL:
@@ -1132,6 +1129,9 @@ void qlt_free_session_done(struct work_struct *work)
                        break;
                }
        }
+
+       if (vha->fcport_count == 0)
+               wake_up_all(&vha->fcport_waitQ);
 }
 
 /* ha->tgt.sess_lock supposed to be held on entry */
@@ -1161,7 +1161,7 @@ void qlt_unreg_sess(struct fc_port *sess)
        sess->last_login_gen = sess->login_gen;
 
        INIT_WORK(&sess->free_work, qlt_free_session_done);
-       schedule_work(&sess->free_work);
+       queue_work(sess->vha->hw->wq, &sess->free_work);
 }
 EXPORT_SYMBOL(qlt_unreg_sess);
 
index 1c470e31ae815feb34ffad7b71a675aa4ce212f3..ae2fa170f6ad3f2267efd4ac6b8fd18eae8a3514 100644 (file)
@@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        ses->data_direction = scmd->sc_data_direction;
        ses->sdb = scmd->sdb;
        ses->result = scmd->result;
+       ses->resid_len = scmd->req.resid_len;
        ses->underflow = scmd->underflow;
        ses->prot_op = scmd->prot_op;
        ses->eh_eflags = scmd->eh_eflags;
@@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
        memset(scmd->cmnd, 0, BLK_MAX_CDB);
        memset(&scmd->sdb, 0, sizeof(scmd->sdb));
        scmd->result = 0;
+       scmd->req.resid_len = 0;
 
        if (sense_bytes) {
                scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE,
@@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
        scmd->sc_data_direction = ses->data_direction;
        scmd->sdb = ses->sdb;
        scmd->result = ses->result;
+       scmd->req.resid_len = ses->resid_len;
        scmd->underflow = ses->underflow;
        scmd->prot_op = ses->prot_op;
        scmd->eh_eflags = ses->eh_eflags;
index dc210b9d4896b738ee2a3e2b78fde8c89910004b..5447738906ac04257fe6b1d92ee1460b75c825e3 100644 (file)
@@ -1834,6 +1834,7 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
        .init_request   = scsi_mq_init_request,
        .exit_request   = scsi_mq_exit_request,
        .initialize_rq_fn = scsi_initialize_rq,
+       .cleanup_rq     = scsi_cleanup_rq,
        .busy           = scsi_mq_lld_busy,
        .map_queues     = scsi_map_queues,
 };
@@ -1921,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
 {
        struct scsi_device *sdev = NULL;
 
-       if (q->mq_ops == &scsi_mq_ops)
+       if (q->mq_ops == &scsi_mq_ops_no_commit ||
+           q->mq_ops == &scsi_mq_ops)
                sdev = q->queuedata;
        if (!sdev || !get_device(&sdev->sdev_gendev))
                sdev = NULL;
index 64c96c7828ee1056042da4a913d9f3f2b9506506..6d7362e7367edc58a757cd56d3b13c5b644883b3 100644 (file)
@@ -730,6 +730,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
                  const char *buf, size_t count)
 {
        struct kernfs_node *kn;
+       struct scsi_device *sdev = to_scsi_device(dev);
+
+       /*
+        * We need to try to get module, avoiding the module been removed
+        * during delete.
+        */
+       if (scsi_device_get(sdev))
+               return -ENODEV;
 
        kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
        WARN_ON_ONCE(!kn);
@@ -744,9 +752,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr,
         * state into SDEV_DEL.
         */
        device_remove_file(dev, attr);
-       scsi_remove_device(to_scsi_device(dev));
+       scsi_remove_device(sdev);
        if (kn)
                sysfs_unbreak_active_protection(kn);
+       scsi_device_put(sdev);
        return count;
 };
 static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete);
index 50928bc266eb24d5995affc32f7de12ec1ca49b2..ebb40160539f52e1c6ee65802fb9dd731868ed89 100644 (file)
@@ -1166,11 +1166,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
        sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
        sector_t threshold;
        unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
-       bool dif, dix;
        unsigned int mask = logical_to_sectors(sdp, 1) - 1;
        bool write = rq_data_dir(rq) == WRITE;
        unsigned char protect, fua;
        blk_status_t ret;
+       unsigned int dif;
+       bool dix;
 
        ret = scsi_init_io(cmd);
        if (ret != BLK_STS_OK)
@@ -1654,7 +1655,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
                /* we need to evaluate the error return  */
                if (scsi_sense_valid(sshdr) &&
                        (sshdr->asc == 0x3a ||  /* medium not present */
-                        sshdr->asc == 0x20))   /* invalid command */
+                        sshdr->asc == 0x20 ||  /* invalid command */
+                        (sshdr->asc == 0x74 && sshdr->ascq == 0x71)))  /* drive is password locked */
                                /* this is no error here */
                                return 0;
 
index aef4881d8e21588f7698aeccb982ae416e6972ff..a85d52b5dc32094d04219b548619c2b7ea1de23f 100644 (file)
@@ -66,10 +66,8 @@ static int snirm710_probe(struct platform_device *dev)
 
        base = res->start;
        hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
-       if (!hostdata) {
-               dev_printk(KERN_ERR, dev, "Failed to allocate host data\n");
+       if (!hostdata)
                return -ENOMEM;
-       }
 
        hostdata->dev = &dev->dev;
        dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
index ed8b9ac805e6211ecb2c80570fb21e9531ece438..542d2bac2922c90b0f38ac637edbe60b04df78fc 100644 (file)
@@ -1837,8 +1837,7 @@ static int storvsc_probe(struct hv_device *device,
        /*
         * Set the number of HW queues we are supporting.
         */
-       if (stor_device->num_sc != 0)
-               host->nr_hw_queues = stor_device->num_sc + 1;
+       host->nr_hw_queues = num_present_cpus();
 
        /*
         * Set the error handler work queue.
index a9344eb4e047f1e73c28212024c1e8dcefb19228..dc2f6d2b46edc306f8696cd4471358a86dd6df49 100644 (file)
@@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job)
 
        bsg_reply->reply_payload_rcv_len = 0;
 
+       pm_runtime_get_sync(hba->dev);
+
        msgcode = bsg_request->msgcode;
        switch (msgcode) {
        case UPIU_TRANSACTION_QUERY_REQ:
@@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job)
                break;
        }
 
+       pm_runtime_put_sync(hba->dev);
+
        if (!desc_buff)
                goto out;
 
index 034dd9cb9ec8ccd9cd12738170bda35d2de3a58e..11a87f51c442a5b5acfc872771c58ad10709fa6b 100644 (file)
@@ -8143,6 +8143,9 @@ int ufshcd_shutdown(struct ufs_hba *hba)
 {
        int ret = 0;
 
+       if (!hba->is_powered)
+               goto out;
+
        if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
                goto out;
 
index 50831ebf126afcdc2ea26e5c2d1db98e05cd0afa..c68882eb80f778e93bf7b1e29cc18b8e06f44d7a 100644 (file)
@@ -46,7 +46,7 @@ static ssize_t soc_uid_show(struct device *dev,
        hdr->func = IMX_SC_MISC_FUNC_UNIQUE_ID;
        hdr->size = 1;
 
-       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, false);
+       ret = imx_scu_call_rpc(soc_ipc_handle, &msg, true);
        if (ret) {
                pr_err("%s: get soc uid failed, ret %d\n", __func__, ret);
                return ret;
index 290dbfc7ace1a03d76ac866117c578b90d5261a7..ce32dfe33becdd7f2ab890d9814de09255ff1216 100644 (file)
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
 config EXFAT_FS
        tristate "exFAT fs support"
        depends on BLOCK
@@ -6,7 +7,7 @@ config EXFAT_FS
          This adds support for the exFAT file system.
 
 config EXFAT_DONT_MOUNT_VFAT
-       bool "Prohibit mounting of fat/vfat filesysems by exFAT"
+       bool "Prohibit mounting of fat/vfat filesystems by exFAT"
        depends on EXFAT_FS
        default y
        help
index 84944dfbae28d4f61c7478f19bc82d9466196e6f..6c90aec83febfd538f69d66d8cd77a0ab120f011 100644 (file)
@@ -1,4 +1,4 @@
-# SPDX-License-Identifier: GPL-2.0
+# SPDX-License-Identifier: GPL-2.0-or-later
 
 obj-$(CONFIG_EXFAT_FS) += exfat.o
 
index 6c12f2d79f4d310c0e09a386e7f0fa9423d9e0a0..3abab33e932c0c48b08d9f27f1f7a780c2457faf 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+/* SPDX-License-Identifier: GPL-2.0-or-later */
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index f086c75e7076fb1c1c0a65dad778d558a67368ba..81d20e6241c65619dda4900ac7a9f3ce5ff6bdf1 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 1565ce65d39f42bbedd341cc184c2ba2cd8828ef..e1b001718709088d62fcaaaad78c17ca214bd690 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index b3e9cf725cf57c28579599bea45e75909fba8661..79174e5c4145f397f18e781b51fe6378727859a3 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 03cb8290b5d261d5c8df5a1a61c511f3f20df905..a5c4b68925fba64299c9f7d6455d951cb1beca1a 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 5f6caee819a6b26d2397e54645fa16144b749f26..3b2b0ceb7297a0bcd932306d64f2189de486eaef 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
@@ -7,6 +7,7 @@
 #include <linux/init.h>
 #include <linux/time.h>
 #include <linux/slab.h>
+#include <linux/mm.h>
 #include <linux/seq_file.h>
 #include <linux/pagemap.h>
 #include <linux/mpage.h>
@@ -3450,7 +3451,7 @@ static void exfat_free_super(struct exfat_sb_info *sbi)
                kfree(sbi->options.iocharset);
        /* mutex_init is in exfat_fill_super function. only for 3.7+ */
        mutex_destroy(&sbi->s_lock);
-       kfree(sbi);
+       kvfree(sbi);
 }
 
 static void exfat_put_super(struct super_block *sb)
@@ -3845,7 +3846,7 @@ static int exfat_fill_super(struct super_block *sb, void *data, int silent)
         * the filesystem, since we're only just about to mount
         * it and have no inodes etc active!
         */
-       sbi = kzalloc(sizeof(struct exfat_sb_info), GFP_KERNEL);
+       sbi = kvzalloc(sizeof(*sbi), GFP_KERNEL);
        if (!sbi)
                return -ENOMEM;
        mutex_init(&sbi->s_lock);
index 366082fb3dabc8c1ff5263fd246275921a7f8a74..b91a1faa0e503ada734fd51c50ece4be4c611a73 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-or-later
 /*
  *  Copyright (C) 2012-2013 Samsung Electronics Co., Ltd.
  */
index 8ec524a95ec8f6f2ec7c227408c4e0e1eef5074a..cb61c2a772bdd8dc0240cb59d634842ad87d3a20 100644 (file)
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 menuconfig FB_TFT
        tristate "Support for small TFT LCD display modules"
-       depends on FB && SPI
+       depends on FB && SPI && OF
        depends on GPIOLIB || COMPILE_TEST
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
@@ -199,13 +199,3 @@ config FB_TFT_WATTEROTT
        depends on FB_TFT
        help
          Generic Framebuffer support for WATTEROTT
-
-config FB_FLEX
-       tristate "Generic FB driver for TFT LCD displays"
-       depends on FB_TFT
-       help
-         Generic Framebuffer support for TFT LCD displays.
-
-config FB_TFT_FBTFT_DEVICE
-       tristate "Module to for adding FBTFT devices"
-       depends on FB_TFT
index 6bc03311c9c7e63d929dc5a287f9c7bee89d2737..27af43f32f81e198bc24e108c30c45b451909d28 100644 (file)
@@ -36,7 +36,3 @@ obj-$(CONFIG_FB_TFT_UC1611)      += fb_uc1611.o
 obj-$(CONFIG_FB_TFT_UC1701)      += fb_uc1701.o
 obj-$(CONFIG_FB_TFT_UPD161704)   += fb_upd161704.o
 obj-$(CONFIG_FB_TFT_WATTEROTT)   += fb_watterott.o
-obj-$(CONFIG_FB_FLEX)            += flexfb.o
-
-# Device modules
-obj-$(CONFIG_FB_TFT_FBTFT_DEVICE) += fbtft_device.o
index cf5700a2ea6688d6e73e07151fe40b5d4e6af7e8..a0a67aa517f0e483ce3e75745c11c75ecb5c1d3a 100644 (file)
@@ -714,7 +714,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (par->gamma.curves && gamma) {
                if (fbtft_gamma_parse_str(par, par->gamma.curves, gamma,
                                          strlen(gamma)))
-                       goto alloc_fail;
+                       goto release_framebuf;
        }
 
        /* Transmit buffer */
@@ -731,7 +731,7 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
        if (txbuflen > 0) {
                txbuf = devm_kzalloc(par->info->device, txbuflen, GFP_KERNEL);
                if (!txbuf)
-                       goto alloc_fail;
+                       goto release_framebuf;
                par->txbuf.buf = txbuf;
                par->txbuf.len = txbuflen;
        }
@@ -753,6 +753,9 @@ struct fb_info *fbtft_framebuffer_alloc(struct fbtft_display *display,
 
        return info;
 
+release_framebuf:
+       framebuffer_release(info);
+
 alloc_fail:
        vfree(vmem);
 
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
deleted file mode 100644 (file)
index 44e1410..0000000
+++ /dev/null
@@ -1,1261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- *
- * Copyright (C) 2013, Noralf Tronnes
- */
-
-#define pr_fmt(fmt) "fbtft_device: " fmt
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <video/mipi_display.h>
-
-#include "fbtft.h"
-
-#define MAX_GPIOS 32
-
-static struct spi_device *spi_device;
-static struct platform_device *p_device;
-
-static char *name;
-module_param(name, charp, 0000);
-MODULE_PARM_DESC(name,
-                "Devicename (required). name=list => list all supported devices.");
-
-static unsigned int rotate;
-module_param(rotate, uint, 0000);
-MODULE_PARM_DESC(rotate,
-                "Angle to rotate display counter clockwise: 0, 90, 180, 270");
-
-static unsigned int busnum;
-module_param(busnum, uint, 0000);
-MODULE_PARM_DESC(busnum, "SPI bus number (default=0)");
-
-static unsigned int cs;
-module_param(cs, uint, 0000);
-MODULE_PARM_DESC(cs, "SPI chip select (default=0)");
-
-static unsigned int speed;
-module_param(speed, uint, 0000);
-MODULE_PARM_DESC(speed, "SPI speed (override device default)");
-
-static int mode = -1;
-module_param(mode, int, 0000);
-MODULE_PARM_DESC(mode, "SPI mode (override device default)");
-
-static unsigned int fps;
-module_param(fps, uint, 0000);
-MODULE_PARM_DESC(fps, "Frames per second (override driver default)");
-
-static char *gamma;
-module_param(gamma, charp, 0000);
-MODULE_PARM_DESC(gamma,
-                "String representation of Gamma Curve(s). Driver specific.");
-
-static int txbuflen;
-module_param(txbuflen, int, 0000);
-MODULE_PARM_DESC(txbuflen, "txbuflen (override driver default)");
-
-static int bgr = -1;
-module_param(bgr, int, 0000);
-MODULE_PARM_DESC(bgr,
-                "BGR bit (supported by some drivers).");
-
-static unsigned int startbyte;
-module_param(startbyte, uint, 0000);
-MODULE_PARM_DESC(startbyte, "Sets the Start byte used by some SPI displays.");
-
-static bool custom;
-module_param(custom, bool, 0000);
-MODULE_PARM_DESC(custom, "Add a custom display device. Use speed= argument to make it a SPI device, else platform_device");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width, used with the custom argument");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height, used with the custom argument");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Display bus width, used with the custom argument");
-
-static s16 init[FBTFT_MAX_INIT_SEQUENCE];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence, used with the custom argument");
-
-static unsigned long debug;
-module_param(debug, ulong, 0000);
-MODULE_PARM_DESC(debug,
-                "level: 0-7 (the remaining 29 bits is for advanced usage)");
-
-static unsigned int verbose = 3;
-module_param(verbose, uint, 0000);
-MODULE_PARM_DESC(verbose,
-                "0 silent, >1 show devices, >2 show devices before (default=3)");
-
-struct fbtft_device_display {
-       char *name;
-       struct spi_board_info *spi;
-       struct platform_device *pdev;
-};
-
-static void fbtft_device_pdev_release(struct device *dev);
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len);
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
-                                             int xs, int ys, int xe, int ye);
-
-#define ADAFRUIT18_GAMMA \
-               "02 1c 07 12 37 32 29 2d 29 25 2B 39 00 01 03 10\n" \
-               "03 1d 07 06 2E 2C 29 2D 2E 2E 37 3F 00 00 02 10"
-
-#define CBERRY28_GAMMA \
-               "D0 00 14 15 13 2C 42 43 4E 09 16 14 18 21\n" \
-               "D0 00 14 15 13 0B 43 55 53 0C 17 14 23 20"
-
-static const s16 cberry28_init_sequence[] = {
-       /* turn off sleep mode */
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 120,
-
-       /* set pixel format to RGB-565 */
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, MIPI_DCS_PIXEL_FMT_16BIT,
-
-       -1, 0xB2, 0x0C, 0x0C, 0x00, 0x33, 0x33,
-
-       /*
-        * VGH = 13.26V
-        * VGL = -10.43V
-        */
-       -1, 0xB7, 0x35,
-
-       /*
-        * VDV and VRH register values come from command write
-        * (instead of NVM)
-        */
-       -1, 0xC2, 0x01, 0xFF,
-
-       /*
-        * VAP =  4.7V + (VCOM + VCOM offset + 0.5 * VDV)
-        * VAN = -4.7V + (VCOM + VCOM offset + 0.5 * VDV)
-        */
-       -1, 0xC3, 0x17,
-
-       /* VDV = 0V */
-       -1, 0xC4, 0x20,
-
-       /* VCOM = 0.675V */
-       -1, 0xBB, 0x17,
-
-       /* VCOM offset = 0V */
-       -1, 0xC5, 0x20,
-
-       /*
-        * AVDD = 6.8V
-        * AVCL = -4.8V
-        * VDS = 2.3V
-        */
-       -1, 0xD0, 0xA4, 0xA1,
-
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-
-       -3,
-};
-
-static const s16 hy28b_init_sequence[] = {
-       -1, 0x00e7, 0x0010, -1, 0x0000, 0x0001,
-       -1, 0x0001, 0x0100, -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030, -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0207, -1, 0x0009, 0x0000,
-       -1, 0x000a, 0x0000, -1, 0x000c, 0x0001,
-       -1, 0x000d, 0x0000, -1, 0x000f, 0x0000,
-       -1, 0x0010, 0x0000, -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000, -1, 0x0013, 0x0000,
-       -2, 50, -1, 0x0010, 0x1590, -1, 0x0011,
-       0x0227, -2, 50, -1, 0x0012, 0x009c, -2, 50,
-       -1, 0x0013, 0x1900, -1, 0x0029, 0x0023,
-       -1, 0x002b, 0x000e, -2, 50,
-       -1, 0x0020, 0x0000, -1, 0x0021, 0x0000,
-       -2, 50, -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00ef, -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013f, -1, 0x0060, 0xa700,
-       -1, 0x0061, 0x0001, -1, 0x006a, 0x0000,
-       -1, 0x0080, 0x0000, -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000, -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000, -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010, -1, 0x0092, 0x0000,
-       -1, 0x0093, 0x0003, -1, 0x0095, 0x0110,
-       -1, 0x0097, 0x0000, -1, 0x0098, 0x0000,
-       -1, 0x0007, 0x0133, -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000, -2, 100, -3 };
-
-#define HY28B_GAMMA \
-       "04 1F 4 7 7 0 7 7 6 0\n" \
-       "0F 00 1 7 4 0 0 0 6 7"
-
-static const s16 pitft_init_sequence[] = {
-       -1, MIPI_DCS_SOFT_RESET,
-       -2, 5,
-       -1, MIPI_DCS_SET_DISPLAY_OFF,
-       -1, 0xEF, 0x03, 0x80, 0x02,
-       -1, 0xCF, 0x00, 0xC1, 0x30,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xE8, 0x85, 0x00, 0x78,
-       -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-       -1, 0xF7, 0x20,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xC0, 0x23,
-       -1, 0xC1, 0x10,
-       -1, 0xC5, 0x3E, 0x28,
-       -1, 0xC7, 0x86,
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
-       -1, 0xB1, 0x00, 0x18,
-       -1, 0xB6, 0x08, 0x82, 0x27,
-       -1, 0xF2, 0x00,
-       -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-       -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
-               0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
-       -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
-               0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 100,
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-       -2, 20,
-       -3
-};
-
-static const s16 waveshare32b_init_sequence[] = {
-       -1, 0xCB, 0x39, 0x2C, 0x00, 0x34, 0x02,
-       -1, 0xCF, 0x00, 0xC1, 0x30,
-       -1, 0xE8, 0x85, 0x00, 0x78,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xF7, 0x20,
-       -1, 0xC0, 0x23,
-       -1, 0xC1, 0x10,
-       -1, 0xC5, 0x3E, 0x28,
-       -1, 0xC7, 0x86,
-       -1, MIPI_DCS_SET_ADDRESS_MODE, 0x28,
-       -1, MIPI_DCS_SET_PIXEL_FORMAT, 0x55,
-       -1, 0xB1, 0x00, 0x18,
-       -1, 0xB6, 0x08, 0x82, 0x27,
-       -1, 0xF2, 0x00,
-       -1, MIPI_DCS_SET_GAMMA_CURVE, 0x01,
-       -1, 0xE0, 0x0F, 0x31, 0x2B, 0x0C, 0x0E, 0x08, 0x4E,
-               0xF1, 0x37, 0x07, 0x10, 0x03, 0x0E, 0x09, 0x00,
-       -1, 0xE1, 0x00, 0x0E, 0x14, 0x03, 0x11, 0x07, 0x31,
-               0xC1, 0x48, 0x08, 0x0F, 0x0C, 0x31, 0x36, 0x0F,
-       -1, MIPI_DCS_EXIT_SLEEP_MODE,
-       -2, 120,
-       -1, MIPI_DCS_SET_DISPLAY_ON,
-       -1, MIPI_DCS_WRITE_MEMORY_START,
-       -3
-};
-
-#define PIOLED_GAMMA   "0 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 2 " \
-                       "2 2 2 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 " \
-                       "3 3 3 4 4 4 4 4 4 4 4 4 4 4 4"
-
-/* Supported displays in alphabetical order */
-static struct fbtft_device_display displays[] = {
-       {
-               .name = "adafruit18",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .gamma = ADAFRUIT18_GAMMA,
-                       }
-               }
-       }, {
-               .name = "adafruit18_green",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 4000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .fbtftops.set_addr_win =
-                                           adafruit18_green_tab_set_addr_win,
-                               },
-                               .bgr = true,
-                               .gamma = ADAFRUIT18_GAMMA,
-                       }
-               }
-       }, {
-               .name = "adafruit22",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8340bn",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit22a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit28",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "adafruit13m",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1306",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "admatec_c-berry28",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7789v",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = cberry28_init_sequence,
-                               },
-                               .gamma = CBERRY28_GAMMA,
-                       }
-               }
-       }, {
-               .name = "agm1264k-fl",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_agm1264k-fl",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = FBTFT_ONBOARD_BACKLIGHT,
-                               },
-                       },
-                       }
-               }
-       }, {
-               .name = "dogs102",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1701",
-                       .max_speed_hz = 8000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "er_tftm050_2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ra8875",
-                       .max_speed_hz = 5000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .width = 480,
-                                       .height = 272,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "er_tftm070_5",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ra8875",
-                       .max_speed_hz = 5000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .width = 800,
-                                       .height = 480,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "ew24ha0",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1611",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "ew24ha0_9bit",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_uc1611",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                               },
-                       }
-               }
-       }, {
-               .name = "flexfb",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "flexfb",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-               }
-       }, {
-               .name = "flexpfb",
-               .pdev = &(struct platform_device) {
-                       .name = "flexpfb",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       }
-               }
-       }, {
-               .name = "freetronicsoled128",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1351",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = FBTFT_ONBOARD_BACKLIGHT,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "hx8353d",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8353d",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                       }
-               }
-       }, {
-               .name = "hy28a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9320",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "hy28b",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9325",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = hy28b_init_sequence,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                               .fps = 50,
-                               .gamma = HY28B_GAMMA,
-                       }
-               }
-       }, {
-               .name = "ili9481",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9481",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .regwidth = 16,
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "itdb24",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_s6d1121",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = false,
-                       },
-                       }
-               }
-       }, {
-               .name = "itdb28",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ili9325",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       },
-                       }
-               }
-       }, {
-               .name = "itdb28_spi",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9325",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_hx8347d",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .startbyte = 0x70,
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-9a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 9,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "mi0283qt-v2",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_watterott",
-                       .max_speed_hz = 4000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               }
-       }, {
-               .name = "nokia3310",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_pcd8544",
-                       .max_speed_hz = 400000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "nokia3310a",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_tls8204",
-                       .max_speed_hz = 1000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "nokia5110",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9163",
-                       .max_speed_hz = 12000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "piscreen",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9486",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .regwidth = 16,
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "pitft",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .chip_select = 0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence = pitft_init_sequence,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "pioled",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1351",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                               .bgr = true,
-                               .gamma = PIOLED_GAMMA
-                       }
-               }
-       }, {
-               .name = "rpi-display",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "s6d02a1",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_s6d02a1",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "sainsmart18",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_st7735r",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "sainsmart32",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                                       .fbtftops.write = write_gpio16_wr_slow,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_fast",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_latched",
-               .pdev = &(struct platform_device) {
-                       .name = "fb_ssd1289",
-                       .id = 0,
-                       .dev = {
-                       .release = fbtft_device_pdev_release,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 16,
-                                       .txbuflen = -2, /* disable buffer */
-                                       .backlight = 1,
-                                       .fbtftops.write =
-                                               fbtft_write_gpio16_wr_latched,
-                               },
-                               .bgr = true,
-                       },
-               },
-               }
-       }, {
-               .name = "sainsmart32_spi",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1289",
-                       .max_speed_hz = 16000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "spidev",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "spidev",
-                       .max_speed_hz = 500000,
-                       .bus_num = 0,
-                       .chip_select = 0,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               }
-       }, {
-               .name = "ssd1331",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ssd1331",
-                       .max_speed_hz = 20000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "tinylcd35",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_tinylcd",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tm022hdh26",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9341",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tontec35_9481", /* boards before 02 July 2014 */
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9481",
-                       .max_speed_hz = 128000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "tontec35_9486", /* boards after 02 July 2014 */
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9486",
-                       .max_speed_hz = 128000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "upd161704",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_upd161704",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               .name = "waveshare32b",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_ili9340",
-                       .max_speed_hz = 48000000,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                                       .backlight = 1,
-                                       .init_sequence =
-                                               waveshare32b_init_sequence,
-                               },
-                               .bgr = true,
-                       }
-               }
-       }, {
-               .name = "waveshare22",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "fb_bd663474",
-                       .max_speed_hz = 32000000,
-                       .mode = SPI_MODE_3,
-                       .platform_data = &(struct fbtft_platform_data) {
-                               .display = {
-                                       .buswidth = 8,
-                               },
-                       }
-               }
-       }, {
-               /* This should be the last item.
-                * Used with the custom argument
-                */
-               .name = "",
-               .spi = &(struct spi_board_info) {
-                       .modalias = "",
-                       .max_speed_hz = 0,
-                       .mode = SPI_MODE_0,
-                       .platform_data = &(struct fbtft_platform_data) {
-                       }
-               },
-               .pdev = &(struct platform_device) {
-                       .name = "",
-                       .id = 0,
-                       .dev = {
-                               .release = fbtft_device_pdev_release,
-                               .platform_data = &(struct fbtft_platform_data) {
-                               },
-                       },
-               },
-       }
-};
-
-static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
-{
-       u16 data;
-       int i;
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-       static u16 prev_data;
-#endif
-
-       fbtft_par_dbg_hex(DEBUG_WRITE, par, par->info->device, u8, buf, len,
-                         "%s(len=%zu): ", __func__, len);
-
-       while (len) {
-               data = *(u16 *)buf;
-
-               /* Start writing by pulling down /WR */
-               gpiod_set_value(par->gpio.wr, 0);
-
-               /* Set data */
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-               if (data == prev_data) {
-                       gpiod_set_value(par->gpio.wr, 0); /* used as delay */
-               } else {
-                       for (i = 0; i < 16; i++) {
-                               if ((data & 1) != (prev_data & 1))
-                                       gpiod_set_value(par->gpio.db[i],
-                                                       data & 1);
-                               data >>= 1;
-                               prev_data >>= 1;
-                       }
-               }
-#else
-               for (i = 0; i < 16; i++) {
-                       gpiod_set_value(par->gpio.db[i], data & 1);
-                       data >>= 1;
-               }
-#endif
-
-               /* Pullup /WR */
-               gpiod_set_value(par->gpio.wr, 1);
-
-#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
-               prev_data = *(u16 *)buf;
-#endif
-               buf += 2;
-               len -= 2;
-       }
-
-       return 0;
-}
-
-static void adafruit18_green_tab_set_addr_win(struct fbtft_par *par,
-                                             int xs, int ys, int xe, int ye)
-{
-       write_reg(par, 0x2A, 0, xs + 2, 0, xe + 2);
-       write_reg(par, 0x2B, 0, ys + 1, 0, ye + 1);
-       write_reg(par, 0x2C);
-}
-
-static void fbtft_device_pdev_release(struct device *dev)
-{
-/* Needed to silence this message:
- * Device 'xxx' does not have a release() function,
- * it is broken and must be fixed
- */
-}
-
-static int spi_device_found(struct device *dev, void *data)
-{
-       struct spi_device *spi = to_spi_device(dev);
-
-       dev_info(dev, "%s %s %dkHz %d bits mode=0x%02X\n", spi->modalias,
-                dev_name(dev), spi->max_speed_hz / 1000, spi->bits_per_word,
-                spi->mode);
-
-       return 0;
-}
-
-static void pr_spi_devices(void)
-{
-       pr_debug("SPI devices registered:\n");
-       bus_for_each_dev(&spi_bus_type, NULL, NULL, spi_device_found);
-}
-
-static int p_device_found(struct device *dev, void *data)
-{
-       struct platform_device
-       *pdev = to_platform_device(dev);
-
-       if (strstr(pdev->name, "fb"))
-               dev_info(dev, "%s id=%d pdata? %s\n", pdev->name, pdev->id,
-                        pdev->dev.platform_data ? "yes" : "no");
-
-       return 0;
-}
-
-static void pr_p_devices(void)
-{
-       pr_debug("'fb' Platform devices registered:\n");
-       bus_for_each_dev(&platform_bus_type, NULL, NULL, p_device_found);
-}
-
-#ifdef MODULE
-static void fbtft_device_spi_delete(struct spi_master *master, unsigned int cs)
-{
-       struct device *dev;
-       char str[32];
-
-       snprintf(str, sizeof(str), "%s.%u", dev_name(&master->dev), cs);
-
-       dev = bus_find_device_by_name(&spi_bus_type, NULL, str);
-       if (dev) {
-               if (verbose)
-                       dev_info(dev, "Deleting %s\n", str);
-               device_del(dev);
-       }
-}
-
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
-       struct spi_master *master;
-
-       master = spi_busnum_to_master(spi->bus_num);
-       if (!master) {
-               pr_err("spi_busnum_to_master(%d) returned NULL\n",
-                      spi->bus_num);
-               return -EINVAL;
-       }
-       /* make sure it's available */
-       fbtft_device_spi_delete(master, spi->chip_select);
-       spi_device = spi_new_device(master, spi);
-       put_device(&master->dev);
-       if (!spi_device) {
-               dev_err(&master->dev, "spi_new_device() returned NULL\n");
-               return -EPERM;
-       }
-       return 0;
-}
-#else
-static int fbtft_device_spi_device_register(struct spi_board_info *spi)
-{
-       return spi_register_board_info(spi, 1);
-}
-#endif
-
-static int __init fbtft_device_init(void)
-{
-       struct spi_board_info *spi = NULL;
-       struct fbtft_platform_data *pdata;
-       bool found = false;
-       int i = 0;
-       int ret = 0;
-
-       if (!name) {
-#ifdef MODULE
-               pr_err("missing module parameter: 'name'\n");
-               return -EINVAL;
-#else
-               return 0;
-#endif
-       }
-
-       if (init_num > FBTFT_MAX_INIT_SEQUENCE) {
-               pr_err("init parameter: exceeded max array size: %d\n",
-                      FBTFT_MAX_INIT_SEQUENCE);
-               return -EINVAL;
-       }
-
-       if (verbose > 2) {
-               pr_spi_devices(); /* print list of registered SPI devices */
-               pr_p_devices(); /* print list of 'fb' platform devices */
-       }
-
-       pr_debug("name='%s', busnum=%d, cs=%d\n", name, busnum, cs);
-
-       if (rotate > 0 && rotate < 4) {
-               rotate = (4 - rotate) * 90;
-               pr_warn("argument 'rotate' should be an angle. Values 1-3 is deprecated. Setting it to %d.\n",
-                       rotate);
-       }
-       if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270) {
-               pr_warn("argument 'rotate' illegal value: %d. Setting it to 0.\n",
-                       rotate);
-               rotate = 0;
-       }
-
-       /* name=list lists all supported displays */
-       if (strcmp(name, "list") == 0) {
-               pr_info("Supported displays:\n");
-
-               for (i = 0; i < ARRAY_SIZE(displays); i++)
-                       pr_info("%s\n", displays[i].name);
-               return -ECANCELED;
-       }
-
-       if (custom) {
-               i = ARRAY_SIZE(displays) - 1;
-               displays[i].name = name;
-               if (speed == 0) {
-                       displays[i].pdev->name = name;
-                       displays[i].spi = NULL;
-               } else {
-                       size_t len;
-
-                       len = strlcpy(displays[i].spi->modalias, name,
-                                     SPI_NAME_SIZE);
-                       if (len >= SPI_NAME_SIZE)
-                               pr_warn("modalias (name) truncated to: %s\n",
-                                       displays[i].spi->modalias);
-                       displays[i].pdev = NULL;
-               }
-       }
-
-       for (i = 0; i < ARRAY_SIZE(displays); i++) {
-               if (strncmp(name, displays[i].name, SPI_NAME_SIZE) == 0) {
-                       if (displays[i].spi) {
-                               spi = displays[i].spi;
-                               spi->chip_select = cs;
-                               spi->bus_num = busnum;
-                               if (speed)
-                                       spi->max_speed_hz = speed;
-                               if (mode != -1)
-                                       spi->mode = mode;
-                               pdata = (void *)spi->platform_data;
-                       } else if (displays[i].pdev) {
-                               p_device = displays[i].pdev;
-                               pdata = p_device->dev.platform_data;
-                       } else {
-                               pr_err("broken displays array\n");
-                               return -EINVAL;
-                       }
-
-                       pdata->rotate = rotate;
-                       if (bgr == 0)
-                               pdata->bgr = false;
-                       else if (bgr == 1)
-                               pdata->bgr = true;
-                       if (startbyte)
-                               pdata->startbyte = startbyte;
-                       if (gamma)
-                               pdata->gamma = gamma;
-                       pdata->display.debug = debug;
-                       if (fps)
-                               pdata->fps = fps;
-                       if (txbuflen)
-                               pdata->txbuflen = txbuflen;
-                       if (init_num)
-                               pdata->display.init_sequence = init;
-                       if (custom) {
-                               pdata->display.width = width;
-                               pdata->display.height = height;
-                               pdata->display.buswidth = buswidth;
-                               pdata->display.backlight = 1;
-                       }
-
-                       if (displays[i].spi) {
-                               ret = fbtft_device_spi_device_register(spi);
-                               if (ret) {
-                                       pr_err("failed to register SPI device\n");
-                                       return ret;
-                               }
-                       } else {
-                               ret = platform_device_register(p_device);
-                               if (ret < 0) {
-                                       pr_err("platform_device_register() returned %d\n",
-                                              ret);
-                                       return ret;
-                               }
-                       }
-                       found = true;
-                       break;
-               }
-       }
-
-       if (!found) {
-               pr_err("display not supported: '%s'\n", name);
-               return -EINVAL;
-       }
-
-       if (spi_device && (verbose > 1))
-               pr_spi_devices();
-       if (p_device && (verbose > 1))
-               pr_p_devices();
-
-       return 0;
-}
-
-static void __exit fbtft_device_exit(void)
-{
-       if (spi_device) {
-               device_del(&spi_device->dev);
-               kfree(spi_device);
-       }
-
-       if (p_device)
-               platform_device_unregister(p_device);
-}
-
-arch_initcall(fbtft_device_init);
-module_exit(fbtft_device_exit);
-
-MODULE_DESCRIPTION("Add a FBTFT device.");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
deleted file mode 100644 (file)
index 3747321..0000000
+++ /dev/null
@@ -1,851 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Generic FB driver for TFT LCD displays
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
-#include <linux/gpio/consumer.h>
-#include <linux/spi/spi.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME            "flexfb"
-
-static char *chip;
-module_param(chip, charp, 0000);
-MODULE_PARM_DESC(chip, "LCD controller");
-
-static unsigned int width;
-module_param(width, uint, 0000);
-MODULE_PARM_DESC(width, "Display width");
-
-static unsigned int height;
-module_param(height, uint, 0000);
-MODULE_PARM_DESC(height, "Display height");
-
-static s16 init[512];
-static int init_num;
-module_param_array(init, short, &init_num, 0000);
-MODULE_PARM_DESC(init, "Init sequence");
-
-static unsigned int setaddrwin;
-module_param(setaddrwin, uint, 0000);
-MODULE_PARM_DESC(setaddrwin, "Which set_addr_win() implementation to use");
-
-static unsigned int buswidth = 8;
-module_param(buswidth, uint, 0000);
-MODULE_PARM_DESC(buswidth, "Width of databus (default: 8)");
-
-static unsigned int regwidth = 8;
-module_param(regwidth, uint, 0000);
-MODULE_PARM_DESC(regwidth, "Width of controller register (default: 8)");
-
-static bool nobacklight;
-module_param(nobacklight, bool, 0000);
-MODULE_PARM_DESC(nobacklight, "Turn off backlight functionality.");
-
-static bool latched;
-module_param(latched, bool, 0000);
-MODULE_PARM_DESC(latched, "Use with latched 16-bit databus");
-
-static const s16 *initp;
-static int initp_num;
-
-/* default init sequences */
-static const s16 st7735r_init[] = {
-       -1, 0x01,
-       -2, 150,
-       -1, 0x11,
-       -2, 500,
-       -1, 0xB1, 0x01, 0x2C, 0x2D,
-       -1, 0xB2, 0x01, 0x2C, 0x2D,
-       -1, 0xB3, 0x01, 0x2C, 0x2D, 0x01, 0x2C, 0x2D,
-       -1, 0xB4, 0x07,
-       -1, 0xC0, 0xA2, 0x02, 0x84,
-       -1, 0xC1, 0xC5,
-       -1, 0xC2, 0x0A, 0x00,
-       -1, 0xC3, 0x8A, 0x2A,
-       -1, 0xC4, 0x8A, 0xEE,
-       -1, 0xC5, 0x0E,
-       -1, 0x20,
-       -1, 0x36, 0xC0,
-       -1, 0x3A, 0x05,
-       -1, 0xE0, 0x0f, 0x1a, 0x0f, 0x18, 0x2f, 0x28, 0x20, 0x22,
-           0x1f, 0x1b, 0x23, 0x37, 0x00, 0x07, 0x02, 0x10,
-       -1, 0xE1, 0x0f, 0x1b, 0x0f, 0x17, 0x33, 0x2c, 0x29, 0x2e,
-           0x30, 0x30, 0x39, 0x3f, 0x00, 0x07, 0x03, 0x10,
-       -1, 0x29,
-       -2, 100,
-       -1, 0x13,
-       -2, 10,
-       -3
-};
-
-static const s16 ssd1289_init[] = {
-       -1, 0x00, 0x0001,
-       -1, 0x03, 0xA8A4,
-       -1, 0x0C, 0x0000,
-       -1, 0x0D, 0x080C,
-       -1, 0x0E, 0x2B00,
-       -1, 0x1E, 0x00B7,
-       -1, 0x01, 0x2B3F,
-       -1, 0x02, 0x0600,
-       -1, 0x10, 0x0000,
-       -1, 0x11, 0x6070,
-       -1, 0x05, 0x0000,
-       -1, 0x06, 0x0000,
-       -1, 0x16, 0xEF1C,
-       -1, 0x17, 0x0003,
-       -1, 0x07, 0x0233,
-       -1, 0x0B, 0x0000,
-       -1, 0x0F, 0x0000,
-       -1, 0x41, 0x0000,
-       -1, 0x42, 0x0000,
-       -1, 0x48, 0x0000,
-       -1, 0x49, 0x013F,
-       -1, 0x4A, 0x0000,
-       -1, 0x4B, 0x0000,
-       -1, 0x44, 0xEF00,
-       -1, 0x45, 0x0000,
-       -1, 0x46, 0x013F,
-       -1, 0x30, 0x0707,
-       -1, 0x31, 0x0204,
-       -1, 0x32, 0x0204,
-       -1, 0x33, 0x0502,
-       -1, 0x34, 0x0507,
-       -1, 0x35, 0x0204,
-       -1, 0x36, 0x0204,
-       -1, 0x37, 0x0502,
-       -1, 0x3A, 0x0302,
-       -1, 0x3B, 0x0302,
-       -1, 0x23, 0x0000,
-       -1, 0x24, 0x0000,
-       -1, 0x25, 0x8000,
-       -1, 0x4f, 0x0000,
-       -1, 0x4e, 0x0000,
-       -1, 0x22,
-       -3
-};
-
-static const s16 hx8340bn_init[] = {
-       -1, 0xC1, 0xFF, 0x83, 0x40,
-       -1, 0x11,
-       -2, 150,
-       -1, 0xCA, 0x70, 0x00, 0xD9,
-       -1, 0xB0, 0x01, 0x11,
-       -1, 0xC9, 0x90, 0x49, 0x10, 0x28, 0x28, 0x10, 0x00, 0x06,
-       -2, 20,
-       -1, 0xC2, 0x60, 0x71, 0x01, 0x0E, 0x05, 0x02, 0x09, 0x31, 0x0A,
-       -1, 0xC3, 0x67, 0x30, 0x61, 0x17, 0x48, 0x07, 0x05, 0x33,
-       -2, 10,
-       -1, 0xB5, 0x35, 0x20, 0x45,
-       -1, 0xB4, 0x33, 0x25, 0x4C,
-       -2, 10,
-       -1, 0x3A, 0x05,
-       -1, 0x29,
-       -2, 10,
-       -3
-};
-
-static const s16 ili9225_init[] = {
-       -1, 0x0001, 0x011C,
-       -1, 0x0002, 0x0100,
-       -1, 0x0003, 0x1030,
-       -1, 0x0008, 0x0808,
-       -1, 0x000C, 0x0000,
-       -1, 0x000F, 0x0A01,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -2, 50,
-       -1, 0x0010, 0x0A00,
-       -1, 0x0011, 0x1038,
-       -2, 50,
-       -1, 0x0012, 0x1121,
-       -1, 0x0013, 0x004E,
-       -1, 0x0014, 0x676F,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x00DB,
-       -1, 0x0032, 0x0000,
-       -1, 0x0033, 0x0000,
-       -1, 0x0034, 0x00DB,
-       -1, 0x0035, 0x0000,
-       -1, 0x0036, 0x00AF,
-       -1, 0x0037, 0x0000,
-       -1, 0x0038, 0x00DB,
-       -1, 0x0039, 0x0000,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x060A,
-       -1, 0x0052, 0x0D0A,
-       -1, 0x0053, 0x0303,
-       -1, 0x0054, 0x0A0D,
-       -1, 0x0055, 0x0A06,
-       -1, 0x0056, 0x0000,
-       -1, 0x0057, 0x0303,
-       -1, 0x0058, 0x0000,
-       -1, 0x0059, 0x0000,
-       -2, 50,
-       -1, 0x0007, 0x1017,
-       -2, 50,
-       -3
-};
-
-static const s16 ili9320_init[] = {
-       -1, 0x00E5, 0x8000,
-       -1, 0x0000, 0x0001,
-       -1, 0x0001, 0x0100,
-       -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030,
-       -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0202,
-       -1, 0x0009, 0x0000,
-       -1, 0x000A, 0x0000,
-       -1, 0x000C, 0x0000,
-       -1, 0x000D, 0x0000,
-       -1, 0x000F, 0x0000,
-       -1, 0x0010, 0x0000,
-       -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000,
-       -1, 0x0013, 0x0000,
-       -2, 200,
-       -1, 0x0010, 0x17B0,
-       -1, 0x0011, 0x0031,
-       -2, 50,
-       -1, 0x0012, 0x0138,
-       -2, 50,
-       -1, 0x0013, 0x1800,
-       -1, 0x0029, 0x0008,
-       -2, 50,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x0505,
-       -1, 0x0032, 0x0004,
-       -1, 0x0035, 0x0006,
-       -1, 0x0036, 0x0707,
-       -1, 0x0037, 0x0105,
-       -1, 0x0038, 0x0002,
-       -1, 0x0039, 0x0707,
-       -1, 0x003C, 0x0704,
-       -1, 0x003D, 0x0807,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00EF,
-       -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013F,
-       -1, 0x0060, 0x2700,
-       -1, 0x0061, 0x0001,
-       -1, 0x006A, 0x0000,
-       -1, 0x0080, 0x0000,
-       -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000,
-       -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000,
-       -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010,
-       -1, 0x0092, 0x0000,
-       -1, 0x0093, 0x0003,
-       -1, 0x0095, 0x0110,
-       -1, 0x0097, 0x0000,
-       -1, 0x0098, 0x0000,
-       -1, 0x0007, 0x0173,
-       -3
-};
-
-static const s16 ili9325_init[] = {
-       -1, 0x00E3, 0x3008,
-       -1, 0x00E7, 0x0012,
-       -1, 0x00EF, 0x1231,
-       -1, 0x0001, 0x0100,
-       -1, 0x0002, 0x0700,
-       -1, 0x0003, 0x1030,
-       -1, 0x0004, 0x0000,
-       -1, 0x0008, 0x0207,
-       -1, 0x0009, 0x0000,
-       -1, 0x000A, 0x0000,
-       -1, 0x000C, 0x0000,
-       -1, 0x000D, 0x0000,
-       -1, 0x000F, 0x0000,
-       -1, 0x0010, 0x0000,
-       -1, 0x0011, 0x0007,
-       -1, 0x0012, 0x0000,
-       -1, 0x0013, 0x0000,
-       -2, 200,
-       -1, 0x0010, 0x1690,
-       -1, 0x0011, 0x0223,
-       -2, 50,
-       -1, 0x0012, 0x000D,
-       -2, 50,
-       -1, 0x0013, 0x1200,
-       -1, 0x0029, 0x000A,
-       -1, 0x002B, 0x000C,
-       -2, 50,
-       -1, 0x0020, 0x0000,
-       -1, 0x0021, 0x0000,
-       -1, 0x0030, 0x0000,
-       -1, 0x0031, 0x0506,
-       -1, 0x0032, 0x0104,
-       -1, 0x0035, 0x0207,
-       -1, 0x0036, 0x000F,
-       -1, 0x0037, 0x0306,
-       -1, 0x0038, 0x0102,
-       -1, 0x0039, 0x0707,
-       -1, 0x003C, 0x0702,
-       -1, 0x003D, 0x1604,
-       -1, 0x0050, 0x0000,
-       -1, 0x0051, 0x00EF,
-       -1, 0x0052, 0x0000,
-       -1, 0x0053, 0x013F,
-       -1, 0x0060, 0xA700,
-       -1, 0x0061, 0x0001,
-       -1, 0x006A, 0x0000,
-       -1, 0x0080, 0x0000,
-       -1, 0x0081, 0x0000,
-       -1, 0x0082, 0x0000,
-       -1, 0x0083, 0x0000,
-       -1, 0x0084, 0x0000,
-       -1, 0x0085, 0x0000,
-       -1, 0x0090, 0x0010,
-       -1, 0x0092, 0x0600,
-       -1, 0x0007, 0x0133,
-       -3
-};
-
-static const s16 ili9341_init[] = {
-       -1, 0x28,
-       -2, 20,
-       -1, 0xCF, 0x00, 0x83, 0x30,
-       -1, 0xED, 0x64, 0x03, 0x12, 0x81,
-       -1, 0xE8, 0x85, 0x01, 0x79,
-       -1, 0xCB, 0x39, 0x2c, 0x00, 0x34, 0x02,
-       -1, 0xF7, 0x20,
-       -1, 0xEA, 0x00, 0x00,
-       -1, 0xC0, 0x26,
-       -1, 0xC1, 0x11,
-       -1, 0xC5, 0x35, 0x3E,
-       -1, 0xC7, 0xBE,
-       -1, 0xB1, 0x00, 0x1B,
-       -1, 0xB6, 0x0a, 0x82, 0x27, 0x00,
-       -1, 0xB7, 0x07,
-       -1, 0x3A, 0x55,
-       -1, 0x36, 0x48,
-       -1, 0x11,
-       -2, 120,
-       -1, 0x29,
-       -2, 20,
-       -3
-};
-
-static const s16 ssd1351_init[] = {
-       -1, 0xfd, 0x12,
-       -1, 0xfd, 0xb1,
-       -1, 0xae,
-       -1, 0xb3, 0xf1,
-       -1, 0xca, 0x7f,
-       -1, 0xa0, 0x74,
-       -1, 0x15, 0x00, 0x7f,
-       -1, 0x75, 0x00, 0x7f,
-       -1, 0xa1, 0x00,
-       -1, 0xa2, 0x00,
-       -1, 0xb5, 0x00,
-       -1, 0xab, 0x01,
-       -1, 0xb1, 0x32,
-       -1, 0xb4, 0xa0, 0xb5, 0x55,
-       -1, 0xbb, 0x17,
-       -1, 0xbe, 0x05,
-       -1, 0xc1, 0xc8, 0x80, 0xc8,
-       -1, 0xc7, 0x0f,
-       -1, 0xb6, 0x01,
-       -1, 0xa6,
-       -1, 0xaf,
-       -3
-};
-
-/**
- * struct flexfb_lcd_controller - Describes the LCD controller properties
- * @name: Model name of the chip
- * @width: Width of display in pixels
- * @height: Height of display in pixels
- * @setaddrwin: Which set_addr_win() implementation to use
- * @regwidth: LCD Controller Register width in bits
- * @init_seq: LCD initialization sequence
- * @init_seq_sz: Size of LCD initialization sequence
- */
-struct flexfb_lcd_controller {
-       const char *name;
-       unsigned int width;
-       unsigned int height;
-       unsigned int setaddrwin;
-       unsigned int regwidth;
-       const s16 *init_seq;
-       int init_seq_sz;
-};
-
-static const struct flexfb_lcd_controller flexfb_chip_table[] = {
-       {
-               .name = "st7735r",
-               .width = 120,
-               .height = 160,
-               .init_seq = st7735r_init,
-               .init_seq_sz = ARRAY_SIZE(st7735r_init),
-       },
-       {
-               .name = "hx8340bn",
-               .width = 176,
-               .height = 220,
-               .init_seq = hx8340bn_init,
-               .init_seq_sz = ARRAY_SIZE(hx8340bn_init),
-       },
-       {
-               .name = "ili9225",
-               .width = 176,
-               .height = 220,
-               .regwidth = 16,
-               .init_seq = ili9225_init,
-               .init_seq_sz = ARRAY_SIZE(ili9225_init),
-       },
-       {
-               .name = "ili9320",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 1,
-               .regwidth = 16,
-               .init_seq = ili9320_init,
-               .init_seq_sz = ARRAY_SIZE(ili9320_init),
-       },
-       {
-               .name = "ili9325",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 1,
-               .regwidth = 16,
-               .init_seq = ili9325_init,
-               .init_seq_sz = ARRAY_SIZE(ili9325_init),
-       },
-       {
-               .name = "ili9341",
-               .width = 240,
-               .height = 320,
-               .init_seq = ili9341_init,
-               .init_seq_sz = ARRAY_SIZE(ili9341_init),
-       },
-       {
-               .name = "ssd1289",
-               .width = 240,
-               .height = 320,
-               .setaddrwin = 2,
-               .regwidth = 16,
-               .init_seq = ssd1289_init,
-               .init_seq_sz = ARRAY_SIZE(ssd1289_init),
-       },
-       {
-               .name = "ssd1351",
-               .width = 128,
-               .height = 128,
-               .setaddrwin = 3,
-               .init_seq = ssd1351_init,
-               .init_seq_sz = ARRAY_SIZE(ssd1351_init),
-       },
-};
-
-/* ili9320, ili9325 */
-static void flexfb_set_addr_win_1(struct fbtft_par *par,
-                                 int xs, int ys, int xe, int ye)
-{
-       switch (par->info->var.rotate) {
-       /* R20h = Horizontal GRAM Start Address */
-       /* R21h = Vertical GRAM Start Address */
-       case 0:
-               write_reg(par, 0x0020, xs);
-               write_reg(par, 0x0021, ys);
-               break;
-       case 180:
-               write_reg(par, 0x0020, width - 1 - xs);
-               write_reg(par, 0x0021, height - 1 - ys);
-               break;
-       case 270:
-               write_reg(par, 0x0020, width - 1 - ys);
-               write_reg(par, 0x0021, xs);
-               break;
-       case 90:
-               write_reg(par, 0x0020, ys);
-               write_reg(par, 0x0021, height - 1 - xs);
-               break;
-       }
-       write_reg(par, 0x0022); /* Write Data to GRAM */
-}
-
-/* ssd1289 */
-static void flexfb_set_addr_win_2(struct fbtft_par *par,
-                                 int xs, int ys, int xe, int ye)
-{
-       switch (par->info->var.rotate) {
-       /* R4Eh - Set GDDRAM X address counter */
-       /* R4Fh - Set GDDRAM Y address counter */
-       case 0:
-               write_reg(par, 0x4e, xs);
-               write_reg(par, 0x4f, ys);
-               break;
-       case 180:
-               write_reg(par, 0x4e, par->info->var.xres - 1 - xs);
-               write_reg(par, 0x4f, par->info->var.yres - 1 - ys);
-               break;
-       case 270:
-               write_reg(par, 0x4e, par->info->var.yres - 1 - ys);
-               write_reg(par, 0x4f, xs);
-               break;
-       case 90:
-               write_reg(par, 0x4e, ys);
-               write_reg(par, 0x4f, par->info->var.xres - 1 - xs);
-               break;
-       }
-
-       /* R22h - RAM data write */
-       write_reg(par, 0x22, 0);
-}
-
-/* ssd1351 */
-static void set_addr_win_3(struct fbtft_par *par,
-                          int xs, int ys, int xe, int ye)
-{
-       write_reg(par, 0x15, xs, xe);
-       write_reg(par, 0x75, ys, ye);
-       write_reg(par, 0x5C);
-}
-
-static int flexfb_verify_gpios_dc(struct fbtft_par *par)
-{
-       fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
-       if (!par->gpio.dc) {
-               dev_err(par->info->device,
-                       "Missing info about 'dc' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int flexfb_verify_gpios_db(struct fbtft_par *par)
-{
-       int i;
-       int num_db = buswidth;
-
-       fbtft_par_dbg(DEBUG_VERIFY_GPIOS, par, "%s()\n", __func__);
-
-       if (!par->gpio.dc) {
-               dev_err(par->info->device, "Missing info about 'dc' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (!par->gpio.wr) {
-               dev_err(par->info->device, "Missing info about 'wr' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (latched && !par->gpio.latch) {
-               dev_err(par->info->device, "Missing info about 'latch' gpio. Aborting.\n");
-               return -EINVAL;
-       }
-       if (latched)
-               num_db = buswidth / 2;
-       for (i = 0; i < num_db; i++) {
-               if (!par->gpio.db[i]) {
-                       dev_err(par->info->device,
-                               "Missing info about 'db%02d' gpio. Aborting.\n",
-                               i);
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static void flexfb_chip_load_param(const struct flexfb_lcd_controller *chip)
-{
-       if (!width)
-               width = chip->width;
-       if (!height)
-               height = chip->height;
-       setaddrwin = chip->setaddrwin;
-       if (chip->regwidth)
-               regwidth = chip->regwidth;
-       if (!init_num) {
-               initp = chip->init_seq;
-               initp_num = chip->init_seq_sz;
-       }
-}
-
-static struct fbtft_display flex_display = { };
-
-static int flexfb_chip_init(const struct device *dev)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(flexfb_chip_table); i++)
-               if (!strcmp(chip, flexfb_chip_table[i].name)) {
-                       flexfb_chip_load_param(&flexfb_chip_table[i]);
-                       return 0;
-               }
-
-       dev_err(dev, "chip=%s is not supported\n", chip);
-
-       return -EINVAL;
-}
-
-static int flexfb_probe_common(struct spi_device *sdev,
-                              struct platform_device *pdev)
-{
-       struct device *dev;
-       struct fb_info *info;
-       struct fbtft_par *par;
-       int ret;
-
-       initp = init;
-       initp_num = init_num;
-
-       if (sdev)
-               dev = &sdev->dev;
-       else
-               dev = &pdev->dev;
-
-       fbtft_init_dbg(dev, "%s(%s)\n", __func__,
-                      sdev ? "'SPI device'" : "'Platform device'");
-
-       if (chip) {
-               ret = flexfb_chip_init(dev);
-               if (ret)
-                       return ret;
-       }
-
-       if (width == 0 || height == 0) {
-               dev_err(dev, "argument(s) missing: width and height has to be set.\n");
-               return -EINVAL;
-       }
-       flex_display.width = width;
-       flex_display.height = height;
-       fbtft_init_dbg(dev, "Display resolution: %dx%d\n", width, height);
-       fbtft_init_dbg(dev, "chip = %s\n", chip ? chip : "not set");
-       fbtft_init_dbg(dev, "setaddrwin = %d\n", setaddrwin);
-       fbtft_init_dbg(dev, "regwidth = %d\n", regwidth);
-       fbtft_init_dbg(dev, "buswidth = %d\n", buswidth);
-
-       info = fbtft_framebuffer_alloc(&flex_display, dev, dev->platform_data);
-       if (!info)
-               return -ENOMEM;
-
-       par = info->par;
-       if (sdev)
-               par->spi = sdev;
-       else
-               par->pdev = pdev;
-       if (!par->init_sequence)
-               par->init_sequence = initp;
-       par->fbtftops.init_display = fbtft_init_display;
-
-       /* registerwrite functions */
-       switch (regwidth) {
-       case 8:
-               par->fbtftops.write_register = fbtft_write_reg8_bus8;
-               break;
-       case 16:
-               par->fbtftops.write_register = fbtft_write_reg16_bus8;
-               break;
-       default:
-               dev_err(dev,
-                       "argument 'regwidth': %d is not supported.\n",
-                       regwidth);
-               return -EINVAL;
-       }
-
-       /* bus functions */
-       if (sdev) {
-               par->fbtftops.write = fbtft_write_spi;
-               switch (buswidth) {
-               case 8:
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
-                       if (!par->startbyte)
-                               par->fbtftops.verify_gpios = flexfb_verify_gpios_dc;
-                       break;
-               case 9:
-                       if (regwidth == 16) {
-                               dev_err(dev, "argument 'regwidth': %d is not supported with buswidth=%d and SPI.\n",
-                                       regwidth, buswidth);
-                               return -EINVAL;
-                       }
-                       par->fbtftops.write_register = fbtft_write_reg8_bus9;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
-                       if (par->spi->master->bits_per_word_mask
-                           & SPI_BPW_MASK(9)) {
-                               par->spi->bits_per_word = 9;
-                               break;
-                       }
-
-                       dev_warn(dev,
-                                "9-bit SPI not available, emulating using 8-bit.\n");
-                       /* allocate buffer with room for dc bits */
-                       par->extra = devm_kzalloc(par->info->device,
-                                                 par->txbuf.len
-                                                 + (par->txbuf.len / 8) + 8,
-                                                 GFP_KERNEL);
-                       if (!par->extra) {
-                               ret = -ENOMEM;
-                               goto out_release;
-                       }
-                       par->fbtftops.write = fbtft_write_spi_emulate_9;
-
-                       break;
-               default:
-                       dev_err(dev,
-                               "argument 'buswidth': %d is not supported with SPI.\n",
-                               buswidth);
-                       return -EINVAL;
-               }
-       } else {
-               par->fbtftops.verify_gpios = flexfb_verify_gpios_db;
-               switch (buswidth) {
-               case 8:
-                       par->fbtftops.write = fbtft_write_gpio8_wr;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus8;
-                       break;
-               case 16:
-                       par->fbtftops.write_register = fbtft_write_reg16_bus16;
-                       if (latched)
-                               par->fbtftops.write = fbtft_write_gpio16_wr_latched;
-                       else
-                               par->fbtftops.write = fbtft_write_gpio16_wr;
-                       par->fbtftops.write_vmem = fbtft_write_vmem16_bus16;
-                       break;
-               default:
-                       dev_err(dev,
-                               "argument 'buswidth': %d is not supported with parallel.\n",
-                               buswidth);
-                       return -EINVAL;
-               }
-       }
-
-       /* set_addr_win function */
-       switch (setaddrwin) {
-       case 0:
-               /* use default */
-               break;
-       case 1:
-               par->fbtftops.set_addr_win = flexfb_set_addr_win_1;
-               break;
-       case 2:
-               par->fbtftops.set_addr_win = flexfb_set_addr_win_2;
-               break;
-       case 3:
-               par->fbtftops.set_addr_win = set_addr_win_3;
-               break;
-       default:
-               dev_err(dev, "argument 'setaddrwin': unknown value %d.\n",
-                       setaddrwin);
-               return -EINVAL;
-       }
-
-       if (!nobacklight)
-               par->fbtftops.register_backlight = fbtft_register_backlight;
-
-       ret = fbtft_register_framebuffer(info);
-       if (ret < 0)
-               goto out_release;
-
-       return 0;
-
-out_release:
-       fbtft_framebuffer_release(info);
-
-       return ret;
-}
-
-static int flexfb_remove_common(struct device *dev, struct fb_info *info)
-{
-       struct fbtft_par *par;
-
-       if (!info)
-               return -EINVAL;
-       par = info->par;
-       if (par)
-               fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par, "%s()\n",
-                             __func__);
-       fbtft_unregister_framebuffer(info);
-       fbtft_framebuffer_release(info);
-
-       return 0;
-}
-
-static int flexfb_probe_spi(struct spi_device *spi)
-{
-       return flexfb_probe_common(spi, NULL);
-}
-
-static int flexfb_remove_spi(struct spi_device *spi)
-{
-       struct fb_info *info = spi_get_drvdata(spi);
-
-       return flexfb_remove_common(&spi->dev, info);
-}
-
-static int flexfb_probe_pdev(struct platform_device *pdev)
-{
-       return flexfb_probe_common(NULL, pdev);
-}
-
-static int flexfb_remove_pdev(struct platform_device *pdev)
-{
-       struct fb_info *info = platform_get_drvdata(pdev);
-
-       return flexfb_remove_common(&pdev->dev, info);
-}
-
-static struct spi_driver flexfb_spi_driver = {
-       .driver = {
-               .name   = DRVNAME,
-       },
-       .probe  = flexfb_probe_spi,
-       .remove = flexfb_remove_spi,
-};
-
-static const struct platform_device_id flexfb_platform_ids[] = {
-       { "flexpfb", 0 },
-       { },
-};
-MODULE_DEVICE_TABLE(platform, flexfb_platform_ids);
-
-static struct platform_driver flexfb_platform_driver = {
-       .driver = {
-               .name   = DRVNAME,
-       },
-       .id_table = flexfb_platform_ids,
-       .probe  = flexfb_probe_pdev,
-       .remove = flexfb_remove_pdev,
-};
-
-static int __init flexfb_init(void)
-{
-       int ret, ret2;
-
-       ret = spi_register_driver(&flexfb_spi_driver);
-       ret2 = platform_driver_register(&flexfb_platform_driver);
-       if (ret < 0)
-               return ret;
-       return ret2;
-}
-
-static void __exit flexfb_exit(void)
-{
-       spi_unregister_driver(&flexfb_spi_driver);
-       platform_driver_unregister(&flexfb_platform_driver);
-}
-
-/* ------------------------------------------------------------------------- */
-
-module_init(flexfb_init);
-module_exit(flexfb_exit);
-
-MODULE_DESCRIPTION("Generic FB driver for TFT LCD displays");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
index c64728fc21f229d867a60d175870854a11c5dfee..83469061a542648aafc5b150969c55ab222261de 100644 (file)
@@ -261,11 +261,11 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
        /* Build the PKO buffer pointer */
        hw_buffer.u64 = 0;
        if (skb_shinfo(skb)->nr_frags == 0) {
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+               hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
                hw_buffer.s.pool = 0;
                hw_buffer.s.size = skb->len;
        } else {
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
+               hw_buffer.s.addr = XKPHYS_TO_PHYS((uintptr_t)skb->data);
                hw_buffer.s.pool = 0;
                hw_buffer.s.size = skb_headlen(skb);
                CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
@@ -273,11 +273,12 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
                        skb_frag_t *fs = skb_shinfo(skb)->frags + i;
 
                        hw_buffer.s.addr =
-                               XKPHYS_TO_PHYS((u64)skb_frag_address(fs));
+                               XKPHYS_TO_PHYS((uintptr_t)skb_frag_address(fs));
                        hw_buffer.s.size = skb_frag_size(fs);
                        CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
                }
-               hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
+               hw_buffer.s.addr =
+                       XKPHYS_TO_PHYS((uintptr_t)CVM_OCT_SKB_CB(skb));
                hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
                pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
                pko_command.s.gather = 1;
@@ -349,10 +350,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
         */
        dst_release(skb_dst(skb));
        skb_dst_set(skb, NULL);
-#ifdef CONFIG_XFRM
-       secpath_reset(skb);
-#endif
-       nf_reset(skb);
+       skb_ext_reset(skb);
+       nf_reset_ct(skb);
 
 #ifdef CONFIG_NET_SCHED
        skb->tc_index = 0;
index a4ac3bfb62a85e6536545831408778038b3373c6..b78ce9eaab85d310225e1864da4a89a71a3f2f05 100644 (file)
@@ -1202,7 +1202,7 @@ static inline int cvmx_wqe_get_grp(cvmx_wqe_t *work)
 
 static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
 {
-       return (void *)(physical_address);
+       return (void *)(uintptr_t)(physical_address);
 }
 
 static inline uint64_t cvmx_ptr_to_phys(void *ptr)
index 9ddd51685063d50e4aa1eaad42d1e255ec6881fd..5792f491b59acb58fd9aafd8f292a427c11066a4 100644 (file)
@@ -409,7 +409,7 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
                pRaInfo->PTModeSS = 3;
        else if (pRaInfo->HighestRate > 0x0b)
                pRaInfo->PTModeSS = 2;
-       else if (pRaInfo->HighestRate > 0x0b)
+       else if (pRaInfo->HighestRate > 0x03)
                pRaInfo->PTModeSS = 1;
        else
                pRaInfo->PTModeSS = 0;
index 664d93a7f90dff9709580437feab404d46af4dff..4fac9dca798e812d8f4315baff14193fb6fcf13f 100644 (file)
@@ -348,8 +348,10 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
        }
 
        padapter->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
-       if (!padapter->HalData)
-               DBG_88E("cant not alloc memory for HAL DATA\n");
+       if (!padapter->HalData) {
+               DBG_88E("Failed to allocate memory for HAL data\n");
+               goto free_adapter;
+       }
 
        /* step read_chip_version */
        rtw_hal_read_chip_version(padapter);
diff --git a/drivers/staging/speakup/sysfs-driver-speakup b/drivers/staging/speakup/sysfs-driver-speakup
new file mode 100644 (file)
index 0000000..be3f5d6
--- /dev/null
@@ -0,0 +1,369 @@
+What:          /sys/accessibility/speakup/attrib_bleep
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Beeps the PC speaker when there is an attribute change such as
+               foreground or background color when using speakup review
+               commands. One = on, zero = off.
+
+What:          /sys/accessibility/speakup/bell_pos
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This works much like a typewriter bell. If for example 72 is
+               echoed to bell_pos, it will beep the PC speaker when typing on
+               a line past character 72.
+
+What:          /sys/accessibility/speakup/bleeps
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls whether one hears beeps through the PC speaker
+               when using speakup's review commands.
+               TODO: what values does it accept?
+
+What:          /sys/accessibility/speakup/bleep_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls the duration of the PC speaker beeps speakup
+               produces.
+               TODO: What are the units? Jiffies?
+
+What:          /sys/accessibility/speakup/cursor_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls cursor delay when using arrow keys. When a
+               connection is very slow, with the default setting, when moving
+               with  the arrows, or backspacing etc. speakup says the incorrect
+               characters. Set this to a higher value to adjust for the delay
+               and better synchronisation between cursor position and speech.
+
+What:          /sys/accessibility/speakup/delimiters
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Delimit a word from speakup.
+               TODO: add more info
+
+What:          /sys/accessibility/speakup/ex_num
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/key_echo
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if speakup speaks keys when they are typed. One = on,
+               zero = off or don't echo keys.
+
+What:          /sys/accessibility/speakup/keymap
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Speakup keymap remaps keys to Speakup functions.
+               It uses a binary
+               format. A special program called genmap is needed to compile a
+               textual  keymap into the binary format which is then loaded into
+               /sys/accessibility/speakup/keymap.
+
+What:          /sys/accessibility/speakup/no_interrupt
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if typing interrupts output from speakup. With
+               no_interrupt set to zero, typing on the keyboard will interrupt
+               speakup if for example
+               the say screen command is used before the
+               entire screen  is read.
+               With no_interrupt set to one, if the say
+               screen command is used, and one then types on the keyboard,
+               speakup will continue to say the whole screen regardless until
+               it finishes.
+
+What:          /sys/accessibility/speakup/punc_all
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to four.
+
+What:          /sys/accessibility/speakup/punc_level
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls the level of punctuation spoken as the screen is
+               displayed, not reviewed. Levels range from zero no punctuation,
+               to four, all punctuation. One corresponds to punc_some, two
+               corresponds to punc_most, and three as well as four both
+               correspond to punc_all. Some hardware synthesizers may have
+               different levels each corresponding to  three and four for
+               punc_level. Also note that if punc_level is set to zero, and
+               key_echo is set to one, typed punctuation is still spoken as it
+               is typed.
+
+What:          /sys/accessibility/speakup/punc_most
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to two.
+
+What:          /sys/accessibility/speakup/punc_some
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is a list of all the punctuation speakup should speak when
+               punc_level is set to one.
+
+What:          /sys/accessibility/speakup/reading_punc
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Almost the same as punc_level, the differences being that
+               reading_punc controls the level of punctuation when reviewing
+               the screen with speakup's screen review commands. The other
+               difference is that reading_punc set to three speaks punc_all,
+               and reading_punc set to four speaks all punctuation, including
+               spaces.
+
+What:          /sys/accessibility/speakup/repeats
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   A list of characters speakup repeats. Normally, when there are
+               more than three characters in a row, speakup
+               just reads three of
+               those characters. For example, "......" would be read as dot,
+               dot, dot. If a . is added to the list of characters in repeats,
+               "......" would be read as dot, dot, dot, times six.
+
+What:          /sys/accessibility/speakup/say_control
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   If set to one, speakup speaks shift, alt and control when those
+               keys are pressed. If say_control is set to zero, shift, ctrl,
+               and alt are not spoken when they are pressed.
+
+What:          /sys/accessibility/speakup/say_word_ctl
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/silent
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/spell_delay
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls how fast a word is spelled
+               when speakup's say word
+               review command is pressed twice quickly to speak the current
+               word being reviewed. Zero just speaks the letters one after
+               another, while values one through four
+               seem to introduce more of
+               a pause between the spelling of each letter by speakup.
+
+What:          /sys/accessibility/speakup/synth
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the synthesizer driver currently in use. Reading
+               synth returns the synthesizer driver currently in use. Writing
+               synth switches to the given synthesizer driver, provided it is
+               either built into the kernel, or already loaded as a module.
+
+What:          /sys/accessibility/speakup/synth_direct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Sends whatever is written to synth_direct
+               directly to the speech synthesizer in use, bypassing speakup.
+               This could be used to make the synthesizer speak
+               a string, or to
+               send control sequences to the synthesizer to change how the
+               synthesizer behaves.
+
+What:          /sys/accessibility/speakup/version
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Reading version returns the version of speakup, and the version
+               of the synthesizer driver currently in use.
+
+What:          /sys/accessibility/speakup/i18n/announcements
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This file contains various general announcements, most of which
+               cannot be categorized.  You will find messages such as "You
+               killed Speakup", "I'm alive", "leaving help", "parked",
+               "unparked", and others. You will also find the names of the
+               screen edges and cursor tracking modes here.
+
+What:          /sys/accessibility/speakup/i18n/chartab
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO
+
+What:          /sys/accessibility/speakup/i18n/ctl_keys
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Here, you will find names of control keys.  These are used with
+               Speakup's say_control feature.
+
+What:          /sys/accessibility/speakup/i18n/function_names
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Here, you will find a list of names for Speakup functions.
+               These are used by the help system.  For example, suppose that
+               you have activated help mode, and you pressed
+               keypad 3.  Speakup
+               says: "keypad 3 is character, say next."
+               The message "character, say next" names a Speakup function, and
+               it comes from this function_names file.
+
+What:          /sys/accessibility/speakup/i18n/states
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This file contains names for key states.
+               Again, these are part of the help system.  For instance, if you
+               had pressed speakup + keypad 3, you would hear:
+               "speakup keypad 3 is go to bottom edge."
+               The speakup key is depressed, so the name of the key state is
+               speakup.
+               This part of the message comes from the states collection.
+
+What:          /sys/accessibility/speakup/i18n/characters
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Through this sys entry, Speakup gives you the ability to change
+               how Speakup pronounces a given character. You could, for
+               example, change how some punctuation characters are spoken. You
+               can even change how Speakup will pronounce certain letters. For
+               further details see '12.  Changing the Pronunciation of
+               Characters' in Speakup User's Guide (file spkguide.txt in
+               source).
+
+What:          /sys/accessibility/speakup/i18n/colors
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   When you use the "say attributes" function, Speakup says the
+               name of the foreground and background colors.  These names come
+               from the i18n/colors file.
+
+What:          /sys/accessibility/speakup/i18n/formatted
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This group of messages contains embedded formatting codes, to
+               specify the type and width of displayed data.  If you change
+               these, you must preserve all of the formatting codes, and they
+               must appear in the order used by the default messages.
+
+What:          /sys/accessibility/speakup/i18n/key_names
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Again, key_names is used by Speakup's help system.  In the
+               previous example, Speakup said that you pressed "keypad 3."
+               This name came from the key_names file.
+
+What:          /sys/accessibility/speakup/<synth-name>/
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   In `/sys/accessibility/speakup` is a directory corresponding to
+               the synthesizer driver currently in use (E.G) `soft` for the
+               soft driver. This directory contains files which control the
+               speech synthesizer itself,
+               as opposed to controlling the speakup
+               screen reader. The parameters in this directory have the same
+               names and functions across all
+               supported synthesizers. The range
+               of values for freq, pitch, rate, and vol is the same for all
+               supported synthesizers, with the given range being internally
+               mapped by the driver to  more or less fit the range of values
+               supported for a given parameter by the individual synthesizer.
+               Below is a description of values and  parameters for soft
+               synthesizer, which is currently the most commonly used.
+
+What:          /sys/accessibility/speakup/soft/caps_start
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is the string that is sent to the synthesizer to cause it
+               to start speaking uppercase letters. For the soft synthesizer
+               and most others, this causes the pitch of the voice to rise
+               above the currently set pitch.
+
+What:          /sys/accessibility/speakup/soft/caps_stop
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This is the string sent to the synthesizer to cause it to stop
+               speaking uppercase letters. In the case of the soft synthesizer
+               and most others, this returns the pitch of the voice
+               down to the
+               currently set pitch.
+
+What:          /sys/accessibility/speakup/soft/delay_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/direct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Controls if punctuation is spoken by speakup, or by the
+               synthesizer.
+               For example, speakup speaks ">" as "greater", while
+               the espeak synthesizer used by the soft driver speaks "greater
+               than". Zero lets speakup speak the punctuation. One lets the
+               synthesizer itself speak punctuation.
+
+What:          /sys/accessibility/speakup/soft/freq
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the frequency of the speech synthesizer. Range is
+               0-9.
+
+What:          /sys/accessibility/speakup/soft/full_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/jiffy_delta
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   This controls how many jiffys the kernel gives to the
+               synthesizer. Setting this too high can make a system unstable,
+               or even crash it.
+
+What:          /sys/accessibility/speakup/soft/pitch
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the pitch of the synthesizer. The range is 0-9.
+
+What:          /sys/accessibility/speakup/soft/punct
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the amount of punctuation spoken by the
+               synthesizer. The range for the soft driver seems to be 0-2.
+               TODO: How is this related to speakup's punc_level, or
+               reading_punc.
+
+What:          /sys/accessibility/speakup/soft/rate
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the rate of the synthesizer. Range is from zero
+               slowest, to nine fastest.
+
+What:          /sys/accessibility/speakup/soft/tone
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the tone of the speech synthesizer. The range for
+               the soft driver seems to be 0-2. This seems to make no
+               difference if using espeak and the espeakup connector.
+               TODO: does espeakup support different tonalities?
+
+What:          /sys/accessibility/speakup/soft/trigger_time
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   TODO:
+
+What:          /sys/accessibility/speakup/soft/voice
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the voice used by the synthesizer if the
+               synthesizer can speak in more than one voice. The range for the
+               soft driver is 0-7. Note that while espeak supports multiple
+               voices, this parameter will not set the voice when the espeakup
+               connector is used  between speakup and espeak.
+
+What:          /sys/accessibility/speakup/soft/vol
+KernelVersion: 2.6
+Contact:       speakup@linux-speakup.org
+Description:   Gets or sets the volume of the speech synthesizer. Range is 0-9,
+               with zero being the softest, and nine being the loudest.
+
index bc1eaa3a07731797e10186b6ec60b73e9c608310..826016c3431a102d8e4a57ab7dbb4ad9c250d6dd 100644 (file)
@@ -12,7 +12,7 @@
 static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
        .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
                 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
-                SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+                SNDRV_PCM_INFO_SYNC_APPLPTR),
        .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
        .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
        .rate_min = 8000,
@@ -29,7 +29,7 @@ static const struct snd_pcm_hardware snd_bcm2835_playback_hw = {
 static const struct snd_pcm_hardware snd_bcm2835_playback_spdif_hw = {
        .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
                 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
-                SNDRV_PCM_INFO_DRAIN_TRIGGER | SNDRV_PCM_INFO_SYNC_APPLPTR),
+                SNDRV_PCM_INFO_SYNC_APPLPTR),
        .formats = SNDRV_PCM_FMTBIT_S16_LE,
        .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_44100 |
        SNDRV_PCM_RATE_48000,
index 23fba01107b9fff39253b3f7877a85ceb37a2d2a..c6f9cf1913d20e90cb5e8d2f0e9d8ee5ce68765c 100644 (file)
@@ -289,6 +289,7 @@ int bcm2835_audio_stop(struct bcm2835_alsa_stream *alsa_stream)
                                         VC_AUDIO_MSG_TYPE_STOP, false);
 }
 
+/* FIXME: this doesn't seem working as expected for "draining" */
 int bcm2835_audio_drain(struct bcm2835_alsa_stream *alsa_stream)
 {
        struct vc_audio_msg m = {
index c6bb4aaf9bd02fc18b6ca9e1bbc7a37e5724c805..082302944c374d7f4053313fa46d7ff97cd298ec 100644 (file)
@@ -1748,8 +1748,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
 
        priv->hw->max_signal = 100;
 
-       if (vnt_init(priv))
+       if (vnt_init(priv)) {
+               device_free_info(priv);
                return -ENODEV;
+       }
 
        device_print_info(priv);
        pci_set_drvdata(pcid, priv);
index eee1998c4b189179b6de81f2fa98717bb5395176..fac38c842ac50bce8f53441369f61fd8cd20a0bd 100644 (file)
@@ -469,10 +469,8 @@ static int prism2_connect(struct wiphy *wiphy, struct net_device *dev,
        /* Set the encryption - we only support wep */
        if (is_wep) {
                if (sme->key) {
-                       if (sme->key_idx >= NUM_WEPKEYS) {
-                               err = -EINVAL;
-                               goto exit;
-                       }
+                       if (sme->key_idx >= NUM_WEPKEYS)
+                               return -EINVAL;
 
                        result = prism2_domibset_uint32(wlandev,
                                DIDMIB_DOT11SMT_PRIVACYTABLE_WEPDEFAULTKEYID,
index c70caf4ea49012006c3a8ac754ff9e1c16399ec7..a2b5c796bbc4a5dfee4513bd93a6449e57d4dbc4 100644 (file)
@@ -1831,7 +1831,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
 
        while (credits) {
                struct sk_buff *p = cxgbit_sock_peek_wr(csk);
-               const u32 csum = (__force u32)p->csum;
+               u32 csum;
 
                if (unlikely(!p)) {
                        pr_err("csk 0x%p,%u, cr %u,%u+%u, empty.\n",
@@ -1840,6 +1840,7 @@ static void cxgbit_fw4_ack(struct cxgbit_sock *csk, struct sk_buff *skb)
                        break;
                }
 
+               csum = (__force u32)p->csum;
                if (unlikely(credits < csum)) {
                        pr_warn("csk 0x%p,%u, cr %u,%u+%u, < %u.\n",
                                csk,  csk->tid,
index 04bf2acd3800ded1e776c087f7e2145bf219553b..2d19f0e332b0157695c6e7e87fdc7a25ab3b59cd 100644 (file)
@@ -1074,27 +1074,6 @@ passthrough_parse_cdb(struct se_cmd *cmd,
        struct se_device *dev = cmd->se_dev;
        unsigned int size;
 
-       /*
-        * Clear a lun set in the cdb if the initiator talking to use spoke
-        * and old standards version, as we can't assume the underlying device
-        * won't choke up on it.
-        */
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-
        /*
         * For REPORT LUNS we always need to emulate the response, for everything
         * else, pass it up.
index 391f39776c6ab8c6205ef0a122079887e53220ea..6b9865c786ba3b146143624295592a19629cae3f 100644 (file)
@@ -88,7 +88,7 @@ struct cpufreq_cooling_device {
        struct cpufreq_policy *policy;
        struct list_head node;
        struct time_in_idle *idle_time;
-       struct dev_pm_qos_request qos_req;
+       struct freq_qos_request qos_req;
 };
 
 static DEFINE_IDA(cpufreq_ida);
@@ -331,7 +331,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
 
        cpufreq_cdev->cpufreq_state = state;
 
-       return dev_pm_qos_update_request(&cpufreq_cdev->qos_req,
+       return freq_qos_update_request(&cpufreq_cdev->qos_req,
                                cpufreq_cdev->freq_table[state].frequency);
 }
 
@@ -615,9 +615,9 @@ __cpufreq_cooling_register(struct device_node *np,
                cooling_ops = &cpufreq_cooling_ops;
        }
 
-       ret = dev_pm_qos_add_request(dev, &cpufreq_cdev->qos_req,
-                                    DEV_PM_QOS_MAX_FREQUENCY,
-                                    cpufreq_cdev->freq_table[0].frequency);
+       ret = freq_qos_add_request(&policy->constraints,
+                                  &cpufreq_cdev->qos_req, FREQ_QOS_MAX,
+                                  cpufreq_cdev->freq_table[0].frequency);
        if (ret < 0) {
                pr_err("%s: Failed to add freq constraint (%d)\n", __func__,
                       ret);
@@ -637,7 +637,7 @@ __cpufreq_cooling_register(struct device_node *np,
        return cdev;
 
 remove_qos_req:
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
 remove_ida:
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
 free_table:
@@ -736,7 +736,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        mutex_unlock(&cooling_list_lock);
 
        thermal_cooling_device_unregister(cdev);
-       dev_pm_qos_remove_request(&cpufreq_cdev->qos_req);
+       freq_qos_remove_request(&cpufreq_cdev->qos_req);
        ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id);
        kfree(cpufreq_cdev->idle_time);
        kfree(cpufreq_cdev->freq_table);
index e55c79eb64309afa131d52c3a4ad0c2a44ca7902..98361acd3053fd30173e09061f068f35661a1280 100644 (file)
@@ -968,6 +968,11 @@ static int __init n_hdlc_init(void)
        
 }      /* end of init_module() */
 
+#ifdef CONFIG_SPARC
+#undef __exitdata
+#define __exitdata
+#endif
+
 static const char hdlc_unregister_ok[] __exitdata =
        KERN_INFO "N_HDLC: line discipline unregistered\n";
 static const char hdlc_unregister_fail[] __exitdata =
index 02c5aff58a7408d3509f020b18f5a5cd93eac6c6..8df89e9cd2542d05dff25fc377b08d30a9efe5c5 100644 (file)
@@ -72,8 +72,8 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 {
        struct serial_8250_men_mcb_data *data;
        struct resource *mem;
-       unsigned int num_ports;
-       unsigned int i;
+       int num_ports;
+       int i;
        void __iomem *membase;
 
        mem = mcb_get_resource(mdev, IORESOURCE_MEM);
@@ -88,7 +88,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
        dev_dbg(&mdev->dev, "found a 16z%03u with %u ports\n",
                mdev->id, num_ports);
 
-       if (num_ports == 0 || num_ports > 4) {
+       if (num_ports <= 0 || num_ports > 4) {
                dev_err(&mdev->dev, "unexpected number of ports: %u\n",
                        num_ports);
                return -ENODEV;
@@ -133,7 +133,7 @@ static int serial_8250_men_mcb_probe(struct mcb_device *mdev,
 
 static void serial_8250_men_mcb_remove(struct mcb_device *mdev)
 {
-       unsigned int num_ports, i;
+       int num_ports, i;
        struct serial_8250_men_mcb_data *data = mcb_get_drvdata(mdev);
 
        if (!data)
index c68e2b3a1634372eead0c67dc3d0f805ed308e02..836e736ae188b82f01af5555e7c622854018b170 100644 (file)
@@ -141,7 +141,7 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl)
 
        serial8250_do_set_mctrl(port, mctrl);
 
-       if (!up->gpios) {
+       if (!mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS)) {
                /*
                 * Turn off autoRTS if RTS is lowered and restore autoRTS
                 * setting if RTS is raised
@@ -456,7 +456,8 @@ static void omap_8250_set_termios(struct uart_port *port,
        up->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS | UPSTAT_AUTOXOFF);
 
        if (termios->c_cflag & CRTSCTS && up->port.flags & UPF_HARD_FLOW &&
-           !up->gpios) {
+           !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_RTS) &&
+           !mctrl_gpio_to_gpiod(up->gpios, UART_GPIO_CTS)) {
                /* Enable AUTOCTS (autoRTS is enabled when RTS is raised) */
                up->port.status |= UPSTAT_AUTOCTS | UPSTAT_AUTORTS;
                priv->efr |= UART_EFR_CTS;
index 4789b5d62f6379cf7709295089ece2a2ac79cc86..67a9eb3f94cecccef0cff08c1fe47bf1638ba689 100644 (file)
@@ -1032,6 +1032,7 @@ config SERIAL_SIFIVE_CONSOLE
        bool "Console on SiFive UART"
        depends on SERIAL_SIFIVE=y
        select SERIAL_CORE_CONSOLE
+       select SERIAL_EARLYCON
        help
          Select this option if you would like to use a SiFive UART as the
          system console.
index 68d74f2b5106ac5455993bd2a8201219e515b8fc..a32f0d2afd596ba4d1f6ebe08f799c5e31dec80c 100644 (file)
@@ -3,7 +3,7 @@
  * Freescale linflexuart serial port driver
  *
  * Copyright 2012-2016 Freescale Semiconductor, Inc.
- * Copyright 2017-2018 NXP
+ * Copyright 2017-2019 NXP
  */
 
 #if defined(CONFIG_SERIAL_FSL_LINFLEXUART_CONSOLE) && \
@@ -246,12 +246,14 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
        struct tty_port *port = &sport->state->port;
        unsigned long flags, status;
        unsigned char rx;
+       bool brk;
 
        spin_lock_irqsave(&sport->lock, flags);
 
        status = readl(sport->membase + UARTSR);
        while (status & LINFLEXD_UARTSR_RMB) {
                rx = readb(sport->membase + BDRM);
+               brk = false;
                flg = TTY_NORMAL;
                sport->icount.rx++;
 
@@ -261,8 +263,11 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
                                status |= LINFLEXD_UARTSR_SZF;
                        if (status & LINFLEXD_UARTSR_BOF)
                                status |= LINFLEXD_UARTSR_BOF;
-                       if (status & LINFLEXD_UARTSR_FEF)
+                       if (status & LINFLEXD_UARTSR_FEF) {
+                               if (!rx)
+                                       brk = true;
                                status |= LINFLEXD_UARTSR_FEF;
+                       }
                        if (status & LINFLEXD_UARTSR_PE)
                                status |=  LINFLEXD_UARTSR_PE;
                }
@@ -271,13 +276,15 @@ static irqreturn_t linflex_rxint(int irq, void *dev_id)
                       sport->membase + UARTSR);
                status = readl(sport->membase + UARTSR);
 
-               if (uart_handle_sysrq_char(sport, (unsigned char)rx))
-                       continue;
-
+               if (brk) {
+                       uart_handle_break(sport);
+               } else {
 #ifdef SUPPORT_SYSRQ
-                       sport->sysrq = 0;
+                       if (uart_handle_sysrq_char(sport, (unsigned char)rx))
+                               continue;
 #endif
-               tty_insert_flip_char(port, rx, flg);
+                       tty_insert_flip_char(port, rx, flg);
+               }
        }
 
        spin_unlock_irqrestore(&sport->lock, flags);
index 3e17bb8a0b16eea373f5748dde9551175b71d5f5..537896c4d887e278e2ea48d8fcd9abe224ab4944 100644 (file)
@@ -548,7 +548,7 @@ static void lpuart_flush_buffer(struct uart_port *port)
                val |= UARTFIFO_TXFLUSH | UARTFIFO_RXFLUSH;
                lpuart32_write(&sport->port, val, UARTFIFO);
        } else {
-               val = readb(sport->port.membase + UARTPFIFO);
+               val = readb(sport->port.membase + UARTCFIFO);
                val |= UARTCFIFO_TXFLUSH | UARTCFIFO_RXFLUSH;
                writeb(val, sport->port.membase + UARTCFIFO);
        }
index 87c58f9f639045b70c38ed5b545df56dd78c408a..5e08f2657b90feb254fc03fac739761e2fe86f85 100644 (file)
@@ -2222,8 +2222,8 @@ static int imx_uart_probe(struct platform_device *pdev)
                return PTR_ERR(base);
 
        rxirq = platform_get_irq(pdev, 0);
-       txirq = platform_get_irq(pdev, 1);
-       rtsirq = platform_get_irq(pdev, 2);
+       txirq = platform_get_irq_optional(pdev, 1);
+       rtsirq = platform_get_irq_optional(pdev, 2);
 
        sport->port.dev = &pdev->dev;
        sport->port.mapbase = res->start;
index 03963af77b15b88ab6e19e38fa7af25fc2f40407..d2d8b34946851deee4c946f5efdf2abce291c8ed 100644 (file)
@@ -740,7 +740,7 @@ static int __init owl_uart_init(void)
        return ret;
 }
 
-static void __init owl_uart_exit(void)
+static void __exit owl_uart_exit(void)
 {
        platform_driver_unregister(&owl_uart_platform_driver);
        uart_unregister_driver(&owl_uart_driver);
index c1b0d7662ef9eb177a40b832204c264f95edb2b8..ff9a27d48bca85b4abfa4197f9f7198ed158cedd 100644 (file)
@@ -815,7 +815,7 @@ static int __init rda_uart_init(void)
        return ret;
 }
 
-static void __init rda_uart_exit(void)
+static void __exit rda_uart_exit(void)
 {
        platform_driver_unregister(&rda_uart_platform_driver);
        uart_unregister_driver(&rda_uart_driver);
index 6e713be1d4e9ebd94d1cd8c84f13f6f9edd892ae..c4a414a46c7fef07ccafcbbfeb0adbec1ba1f7e3 100644 (file)
@@ -1964,8 +1964,10 @@ uart_get_console(struct uart_port *ports, int nr, struct console *co)
  *        console=<name>,io|mmio|mmio16|mmio32|mmio32be|mmio32native,<addr>,<options>
  *
  *     The optional form
+ *
  *        earlycon=<name>,0x<addr>,<options>
  *        console=<name>,0x<addr>,<options>
+ *
  *     is also accepted; the returned @iotype will be UPIO_MEM.
  *
  *     Returns 0 on success or -EINVAL on failure
index d9074303c88e99b6be68b628ee905060d0fbd0b1..fb4781292d4097fb4f6c272b882159b49f486c7a 100644 (file)
@@ -66,6 +66,9 @@ EXPORT_SYMBOL_GPL(mctrl_gpio_set);
 struct gpio_desc *mctrl_gpio_to_gpiod(struct mctrl_gpios *gpios,
                                      enum mctrl_gpio_idx gidx)
 {
+       if (gpios == NULL)
+               return NULL;
+
        return gpios->gpio[gidx];
 }
 EXPORT_SYMBOL_GPL(mctrl_gpio_to_gpiod);
index 4e754a4850e6db63e1693edb04113b2e72e44a0e..22e5d4e13714e8635a54b74ab6c5f02f2c05bb2a 100644 (file)
@@ -2894,8 +2894,12 @@ static int sci_init_single(struct platform_device *dev,
        port->mapbase = res->start;
        sci_port->reg_size = resource_size(res);
 
-       for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i)
-               sci_port->irqs[i] = platform_get_irq(dev, i);
+       for (i = 0; i < ARRAY_SIZE(sci_port->irqs); ++i) {
+               if (i)
+                       sci_port->irqs[i] = platform_get_irq_optional(dev, i);
+               else
+                       sci_port->irqs[i] = platform_get_irq(dev, i);
+       }
 
        /* The SCI generates several interrupts. They can be muxed together or
         * connected to different interrupt lines. In the muxed case only one
index b8b912b5a8b992bcb6bcf2ddd3e5703ca00dd2ae..06e79c11141d440347aa8d6c7d41b777f1c2febe 100644 (file)
@@ -897,7 +897,8 @@ static int __init ulite_init(void)
 static void __exit ulite_exit(void)
 {
        platform_driver_unregister(&ulite_platform_driver);
-       uart_unregister_driver(&ulite_uart_driver);
+       if (ulite_uart_driver.state)
+               uart_unregister_driver(&ulite_uart_driver);
 }
 
 module_init(ulite_init);
index da4563aaaf5c9db19d379d075bb0d95e785c5966..4e55bc327a54758e8f9252e5a8ef188274ae05af 100644 (file)
@@ -1550,7 +1550,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
                goto err_out_id;
        }
 
-       uartps_major = cdns_uart_uart_driver->tty_driver->major;
        cdns_uart_data->cdns_uart_driver = cdns_uart_uart_driver;
 
        /*
@@ -1680,6 +1679,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
                console_port = NULL;
 #endif
 
+       uartps_major = cdns_uart_uart_driver->tty_driver->major;
        cdns_uart_data->cts_override = of_property_read_bool(pdev->dev.of_node,
                                                             "cts-override");
        return 0;
@@ -1741,6 +1741,12 @@ static int cdns_uart_remove(struct platform_device *pdev)
                console_port = NULL;
 #endif
 
+       /* If this is last instance major number should be initialized */
+       mutex_lock(&bitmap_lock);
+       if (bitmap_empty(bitmap, MAX_UART_INSTANCES))
+               uartps_major = 0;
+       mutex_unlock(&bitmap_lock);
+
        uart_unregister_driver(cdns_uart_data->cdns_uart_driver);
        return rc;
 }
index c41ddb61b8578a32c4db947df6bd900ccefbe4b8..b0a29efe7d31706a37c2bc4bbbf66bfc0c718b32 100644 (file)
@@ -159,8 +159,9 @@ static int cdns3_pci_probe(struct pci_dev *pdev,
                wrap->plat_dev = platform_device_register_full(&plat_info);
                if (IS_ERR(wrap->plat_dev)) {
                        pci_disable_device(pdev);
+                       err = PTR_ERR(wrap->plat_dev);
                        kfree(wrap);
-                       return PTR_ERR(wrap->plat_dev);
+                       return err;
                }
        }
 
index 06f1e105be4e7f67618cec8d475d493b62757ab9..c2123ef8d8a3320f11f1fa3b7b0363dc3886cd86 100644 (file)
@@ -160,10 +160,30 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
        if (ret)
                goto err;
 
-       if (cdns->dr_mode != USB_DR_MODE_OTG) {
+       /* Initialize idle role to start with */
+       ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+       if (ret)
+               goto err;
+
+       switch (cdns->dr_mode) {
+       case USB_DR_MODE_OTG:
                ret = cdns3_hw_role_switch(cdns);
                if (ret)
                        goto err;
+               break;
+       case USB_DR_MODE_PERIPHERAL:
+               ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+               if (ret)
+                       goto err;
+               break;
+       case USB_DR_MODE_HOST:
+               ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+               if (ret)
+                       goto err;
+               break;
+       default:
+               ret = -EINVAL;
+               goto err;
        }
 
        return ret;
index 44f652e8b5a2362c3f8b44bdf04e95413c92e0df..e71240b386b4965fc332c5def2e7b4ffa26f9a1b 100644 (file)
@@ -234,9 +234,11 @@ static int cdns3_req_ep0_set_address(struct cdns3_device *priv_dev,
 static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
                                    struct usb_ctrlrequest *ctrl)
 {
+       struct cdns3_endpoint *priv_ep;
        __le16 *response_pkt;
        u16 usb_status = 0;
        u32 recip;
+       u8 index;
 
        recip = ctrl->bRequestType & USB_RECIP_MASK;
 
@@ -262,9 +264,13 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
        case USB_RECIP_INTERFACE:
                return cdns3_ep0_delegate_req(priv_dev, ctrl);
        case USB_RECIP_ENDPOINT:
-               /* check if endpoint is stalled */
+               index = cdns3_ep_addr_to_index(ctrl->wIndex);
+               priv_ep = priv_dev->eps[index];
+
+               /* check if endpoint is stalled or stall is pending */
                cdns3_select_ep(priv_dev, ctrl->wIndex);
-               if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)))
+               if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
+                   (priv_ep->flags & EP_STALL_PENDING))
                        usb_status =  BIT(USB_ENDPOINT_HALT);
                break;
        default:
@@ -332,7 +338,7 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
                         * for sending status stage.
                         * This time should be less then 3ms.
                         */
-                       usleep_range(1000, 2000);
+                       mdelay(1);
                        cdns3_set_register_bit(&priv_dev->regs->usb_cmd,
                                               USB_CMD_STMODE |
                                               USB_STS_TMODE_SEL(tmode - 1));
index 228cdc4ab88627cc01204f56381cfd93e9f99f27..4c1e755093039d0582ca30922d620009441aed0c 100644 (file)
@@ -1145,6 +1145,14 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev,
                request = cdns3_next_request(&priv_ep->pending_req_list);
                priv_req = to_cdns3_request(request);
 
+               trb = priv_ep->trb_pool + priv_ep->dequeue;
+
+               /* Request was dequeued and TRB was changed to TRB_LINK. */
+               if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) {
+                       trace_cdns3_complete_trb(priv_ep, trb);
+                       cdns3_move_deq_to_next_trb(priv_req);
+               }
+
                /* Re-select endpoint. It could be changed by other CPU during
                 * handling usb_gadget_giveback_request.
                 */
@@ -2067,6 +2075,7 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        struct usb_request *req, *req_temp;
        struct cdns3_request *priv_req;
        struct cdns3_trb *link_trb;
+       u8 req_on_hw_ring = 0;
        unsigned long flags;
        int ret = 0;
 
@@ -2083,8 +2092,10 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->pending_req_list,
                                 list) {
-               if (request == req)
+               if (request == req) {
+                       req_on_hw_ring = 1;
                        goto found;
+               }
        }
 
        list_for_each_entry_safe(req, req_temp, &priv_ep->deferred_req_list,
@@ -2096,27 +2107,21 @@ int cdns3_gadget_ep_dequeue(struct usb_ep *ep,
        goto not_found;
 
 found:
-
-       if (priv_ep->wa1_trb == priv_req->trb)
-               cdns3_wa1_restore_cycle_bit(priv_ep);
-
        link_trb = priv_req->trb;
-       cdns3_move_deq_to_next_trb(priv_req);
-       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
-
-       /* Update ring */
-       request = cdns3_next_request(&priv_ep->deferred_req_list);
-       if (request) {
-               priv_req = to_cdns3_request(request);
 
+       /* Update ring only if removed request is on pending_req_list list */
+       if (req_on_hw_ring) {
                link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
                                              (priv_req->start_trb * TRB_SIZE));
                link_trb->control = (link_trb->control & TRB_CYCLE) |
-                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN | TRB_TOGGLE;
-       } else {
-               priv_ep->flags |= EP_UPDATE_EP_TRBADDR;
+                                   TRB_TYPE(TRB_LINK) | TRB_CHAIN;
+
+               if (priv_ep->wa1_trb == priv_req->trb)
+                       cdns3_wa1_restore_cycle_bit(priv_ep);
        }
 
+       cdns3_gadget_giveback(priv_ep, priv_req, -ECONNRESET);
+
 not_found:
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return ret;
@@ -2324,8 +2329,6 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev)
        writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, &regs->usb_conf);
 
        cdns3_configure_dmult(priv_dev, NULL);
-
-       cdns3_gadget_pullup(&priv_dev->gadget, 1);
 }
 
 /**
@@ -2340,9 +2343,35 @@ static int cdns3_gadget_udc_start(struct usb_gadget *gadget,
 {
        struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget);
        unsigned long flags;
+       enum usb_device_speed max_speed = driver->max_speed;
 
        spin_lock_irqsave(&priv_dev->lock, flags);
        priv_dev->gadget_driver = driver;
+
+       /* limit speed if necessary */
+       max_speed = min(driver->max_speed, gadget->max_speed);
+
+       switch (max_speed) {
+       case USB_SPEED_FULL:
+               writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
+               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+               break;
+       case USB_SPEED_HIGH:
+               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
+               break;
+       case USB_SPEED_SUPER:
+               break;
+       default:
+               dev_err(priv_dev->dev,
+                       "invalid maximum_speed parameter %d\n",
+                       max_speed);
+               /* fall through */
+       case USB_SPEED_UNKNOWN:
+               /* default to superspeed */
+               max_speed = USB_SPEED_SUPER;
+               break;
+       }
+
        cdns3_gadget_config(priv_dev);
        spin_unlock_irqrestore(&priv_dev->lock, flags);
        return 0;
@@ -2376,6 +2405,8 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget)
                writel(EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
                readl_poll_timeout_atomic(&priv_dev->regs->ep_cmd, val,
                                          !(val & EP_CMD_EPRST), 1, 100);
+
+               priv_ep->flags &= ~EP_CLAIMED;
        }
 
        /* disable interrupt for device */
@@ -2570,11 +2601,7 @@ static int cdns3_gadget_start(struct cdns3 *cdns)
        /* Check the maximum_speed parameter */
        switch (max_speed) {
        case USB_SPEED_FULL:
-               writel(USB_CONF_SFORCE_FS, &priv_dev->regs->usb_conf);
-               break;
        case USB_SPEED_HIGH:
-               writel(USB_CONF_USB3DIS, &priv_dev->regs->usb_conf);
-               break;
        case USB_SPEED_SUPER:
                break;
        default:
@@ -2662,6 +2689,13 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
 {
        int ret = 0;
 
+       /* Ensure 32-bit DMA Mask in case we switched back from Host mode */
+       ret = dma_set_mask_and_coherent(cdns->dev, DMA_BIT_MASK(32));
+       if (ret) {
+               dev_err(cdns->dev, "Failed to set dma mask: %d\n", ret);
+               return ret;
+       }
+
        cdns3_drd_switch_gadget(cdns, 1);
        pm_runtime_get_sync(cdns->dev);
 
@@ -2700,8 +2734,6 @@ static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
        /* disable interrupt for device */
        writel(0, &priv_dev->regs->usb_ien);
 
-       cdns3_gadget_pullup(&priv_dev->gadget, 0);
-
        return 0;
 }
 
index b498a170b7e881f3e98f6ba860db27cb928f4fde..ae11810f882618c052e4d4315315380fa74e3f9e 100644 (file)
@@ -12,7 +12,6 @@
 #ifdef CONFIG_USB_CDNS3_HOST
 
 int cdns3_host_init(struct cdns3 *cdns);
-void cdns3_host_exit(struct cdns3 *cdns);
 
 #else
 
index 2733a8f71fcd6572634899324e5152538e00299d..ad788bf3fe4f6f8e9ac1865fd3d6c6ca518ba341 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/platform_device.h>
 #include "core.h"
 #include "drd.h"
+#include "host-export.h"
 
 static int __cdns3_host_init(struct cdns3 *cdns)
 {
index 7fea4999d352956bd4221ef9a827dd58113535ee..0d8e3f3804a3f87e8d8a29442f2db0a50c621ac7 100644 (file)
@@ -445,6 +445,7 @@ static void usblp_cleanup(struct usblp *usblp)
        kfree(usblp->readbuf);
        kfree(usblp->device_id_string);
        kfree(usblp->statusbuf);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 }
 
@@ -461,10 +462,12 @@ static int usblp_release(struct inode *inode, struct file *file)
 
        mutex_lock(&usblp_mutex);
        usblp->used = 0;
-       if (usblp->present) {
+       if (usblp->present)
                usblp_unlink_urbs(usblp);
-               usb_autopm_put_interface(usblp->intf);
-       } else          /* finish cleanup from disconnect */
+
+       usb_autopm_put_interface(usblp->intf);
+
+       if (!usblp->present)            /* finish cleanup from disconnect */
                usblp_cleanup(usblp);
        mutex_unlock(&usblp_mutex);
        return 0;
@@ -1111,7 +1114,7 @@ static int usblp_probe(struct usb_interface *intf,
        init_waitqueue_head(&usblp->wwait);
        init_usb_anchor(&usblp->urbs);
        usblp->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
-       usblp->intf = intf;
+       usblp->intf = usb_get_intf(intf);
 
        /* Malloc device ID string buffer to the largest expected length,
         * since we can re-query it on an ioctl and a dynamic string
@@ -1196,6 +1199,7 @@ abort:
        kfree(usblp->readbuf);
        kfree(usblp->statusbuf);
        kfree(usblp->device_id_string);
+       usb_put_intf(usblp->intf);
        kfree(usblp);
 abort_ret:
        return retval;
index 151a74a54386226c1f0fc6a134556151a5bd28dc..1ac1095bfeac8448ff7fae2179a033fb558bb6ff 100644 (file)
@@ -348,6 +348,11 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno, int inum,
 
        /* Validate the wMaxPacketSize field */
        maxp = usb_endpoint_maxp(&endpoint->desc);
+       if (maxp == 0) {
+               dev_warn(ddev, "config %d interface %d altsetting %d endpoint 0x%X has wMaxPacketSize 0, skipping\n",
+                   cfgno, inum, asnum, d->bEndpointAddress);
+               goto skip_to_next_endpoint_or_interface_descriptor;
+       }
 
        /* Find the highest legal maxpacket size for this endpoint */
        i = 0;          /* additional transactions per microframe */
index 89abc6078703f218b48d4239ac60c0c11a1d71a3..556a876c7896251811288f99968f8636ecf310b6 100644 (file)
@@ -102,6 +102,7 @@ config USB_DWC3_MESON_G12A
        depends on ARCH_MESON || COMPILE_TEST
        default USB_DWC3
        select USB_ROLE_SWITCH
+       select REGMAP_MMIO
        help
          Support USB2/3 functionality in Amlogic G12A platforms.
         Say 'Y' or 'M' if you have one such device.
index 999ce5e84d3c934758855e5272d2c11f4d62a367..97d6ae3c4df2fc572f4fbae2f0d6aa9c513d1d88 100644 (file)
@@ -312,8 +312,7 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
 
        reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
        dft = reg & DWC3_GFLADJ_30MHZ_MASK;
-       if (!dev_WARN_ONCE(dwc->dev, dft == dwc->fladj,
-           "request value same as default, ignoring\n")) {
+       if (dft != dwc->fladj) {
                reg &= ~DWC3_GFLADJ_30MHZ_MASK;
                reg |= DWC3_GFLADJ_30MHZ_SDBND_SEL | dwc->fladj;
                dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
index 726100d1ac0d8070e2b0dc2341c80886d1d3f814..c946d64142ad77979908b9f54847790c13baa341 100644 (file)
@@ -139,14 +139,14 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
        struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "otg");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "otg");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -157,9 +157,6 @@ static int dwc3_otg_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing OTG IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index 5e8e18222f922c00a646063b91599be7100feb41..023f0357efd77eae3182aaf596120b8a8847fa8b 100644 (file)
@@ -258,7 +258,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
 
        ret = platform_device_add_properties(dwc->dwc3, p);
        if (ret < 0)
-               return ret;
+               goto err;
 
        ret = dwc3_pci_quirks(dwc);
        if (ret)
index 8adb59f8e4f1af8553598239fe68b1944f2c22b1..a9aba716bf80be70c3a8df033f8e468f59477560 100644 (file)
@@ -707,6 +707,12 @@ static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 
                dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
        }
+
+       while (!list_empty(&dep->cancelled_list)) {
+               req = next_request(&dep->cancelled_list);
+
+               dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
+       }
 }
 
 /**
@@ -3264,14 +3270,14 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
        struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "peripheral");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "peripheral");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -3282,9 +3288,6 @@ static int dwc3_gadget_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing peripheral IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index 8deea8c91e03b230d43bb03ab8a4770ebef8a242..5567ed2cddbec278a1565216c07ac0a6fff475bc 100644 (file)
@@ -16,14 +16,14 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
        struct platform_device  *dwc3_pdev = to_platform_device(dwc->dev);
        int irq;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "host");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
        if (irq > 0)
                goto out;
 
        if (irq == -EPROBE_DEFER)
                goto out;
 
-       irq = platform_get_irq_byname(dwc3_pdev, "dwc_usb3");
+       irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
        if (irq > 0)
                goto out;
 
@@ -34,9 +34,6 @@ static int dwc3_host_get_irq(struct dwc3 *dwc)
        if (irq > 0)
                goto out;
 
-       if (irq != -EPROBE_DEFER)
-               dev_err(dwc->dev, "missing host IRQ\n");
-
        if (!irq)
                irq = -EINVAL;
 
index d516e8d6cd7f939bd6c7bad09ded75bd40353d80..5ec54b69c29c58b1432b3c42b9d9bc58238db1d7 100644 (file)
@@ -2170,14 +2170,18 @@ void composite_dev_cleanup(struct usb_composite_dev *cdev)
                        usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
 
                kfree(cdev->os_desc_req->buf);
+               cdev->os_desc_req->buf = NULL;
                usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
+               cdev->os_desc_req = NULL;
        }
        if (cdev->req) {
                if (cdev->setup_pending)
                        usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
 
                kfree(cdev->req->buf);
+               cdev->req->buf = NULL;
                usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+               cdev->req = NULL;
        }
        cdev->next_string_id = 0;
        device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
index 02512994289463b43107c49871995816272db5f8..33852c2b29d1a5cdf7d04ed35b66873e138e2e1b 100644 (file)
@@ -61,6 +61,8 @@ struct gadget_info {
        bool use_os_desc;
        char b_vendor_code;
        char qw_sign[OS_STRING_QW_SIGN_LEN];
+       spinlock_t spinlock;
+       bool unbind;
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -1244,6 +1246,7 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
        int                             ret;
 
        /* the gi->lock is hold by the caller */
+       gi->unbind = 0;
        cdev->gadget = gadget;
        set_gadget_data(gadget, cdev);
        ret = composite_dev_prepare(composite, cdev);
@@ -1376,31 +1379,128 @@ static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
        struct usb_composite_dev        *cdev;
        struct gadget_info              *gi;
+       unsigned long flags;
 
        /* the gi->lock is hold by the caller */
 
        cdev = get_gadget_data(gadget);
        gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       gi->unbind = 1;
+       spin_unlock_irqrestore(&gi->spinlock, flags);
 
        kfree(otg_desc[0]);
        otg_desc[0] = NULL;
        purge_configs_funcs(gi);
        composite_dev_cleanup(cdev);
        usb_ep_autoconfig_reset(cdev->gadget);
+       spin_lock_irqsave(&gi->spinlock, flags);
        cdev->gadget = NULL;
        set_gadget_data(gadget, NULL);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static int configfs_composite_setup(struct usb_gadget *gadget,
+               const struct usb_ctrlrequest *ctrl)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+       int ret;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return 0;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return 0;
+       }
+
+       ret = composite_setup(gadget, ctrl);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+       return ret;
+}
+
+static void configfs_composite_disconnect(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_disconnect(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_suspend(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_suspend(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
+static void configfs_composite_resume(struct usb_gadget *gadget)
+{
+       struct usb_composite_dev *cdev;
+       struct gadget_info *gi;
+       unsigned long flags;
+
+       cdev = get_gadget_data(gadget);
+       if (!cdev)
+               return;
+
+       gi = container_of(cdev, struct gadget_info, cdev);
+       spin_lock_irqsave(&gi->spinlock, flags);
+       cdev = get_gadget_data(gadget);
+       if (!cdev || gi->unbind) {
+               spin_unlock_irqrestore(&gi->spinlock, flags);
+               return;
+       }
+
+       composite_resume(gadget);
+       spin_unlock_irqrestore(&gi->spinlock, flags);
 }
 
 static const struct usb_gadget_driver configfs_driver_template = {
        .bind           = configfs_composite_bind,
        .unbind         = configfs_composite_unbind,
 
-       .setup          = composite_setup,
-       .reset          = composite_disconnect,
-       .disconnect     = composite_disconnect,
+       .setup          = configfs_composite_setup,
+       .reset          = configfs_composite_disconnect,
+       .disconnect     = configfs_composite_disconnect,
 
-       .suspend        = composite_suspend,
-       .resume         = composite_resume,
+       .suspend        = configfs_composite_suspend,
+       .resume         = configfs_composite_resume,
 
        .max_speed      = USB_SPEED_SUPER,
        .driver = {
index d7e611645533701c81acda6edf39cb664cc6ea13..d354036ff6c86a0ba94bd779d9ae5da81871a360 100644 (file)
@@ -45,7 +45,7 @@ config USB_AT91
 
 config USB_LPC32XX
        tristate "LPC32XX USB Peripheral Controller"
-       depends on ARCH_LPC32XX
+       depends on ARCH_LPC32XX || COMPILE_TEST
        depends on I2C
        select USB_ISP1301
        help
index 86ffc83078647b2ba1dfbfc6e796489af15c9192..1d0d8952a74bfa4a786c6966fff1bbcbc66438a7 100644 (file)
@@ -449,9 +449,11 @@ static void submit_request(struct usba_ep *ep, struct usba_request *req)
                next_fifo_transaction(ep, req);
                if (req->last_transaction) {
                        usba_ep_writel(ep, CTL_DIS, USBA_TX_PK_RDY);
-                       usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
+                       if (ep_is_control(ep))
+                               usba_ep_writel(ep, CTL_ENB, USBA_TX_COMPLETE);
                } else {
-                       usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
+                       if (ep_is_control(ep))
+                               usba_ep_writel(ep, CTL_DIS, USBA_TX_COMPLETE);
                        usba_ep_writel(ep, CTL_ENB, USBA_TX_PK_RDY);
                }
        }
index 92af8dc98c3dcd1d5f0c474293916636049d8332..51fa614b40794310e3216f5b086442d969b5b3be 100644 (file)
@@ -98,6 +98,17 @@ int usb_ep_enable(struct usb_ep *ep)
        if (ep->enabled)
                goto out;
 
+       /* UDC drivers can't handle endpoints with maxpacket size 0 */
+       if (usb_endpoint_maxp(ep->desc) == 0) {
+               /*
+                * We should log an error message here, but we can't call
+                * dev_err() because there's no way to find the gadget
+                * given only ep.
+                */
+               ret = -EINVAL;
+               goto out;
+       }
+
        ret = ep->ops->enable(ep, ep->desc);
        if (ret)
                goto out;
index 8414fac74493f6391cd04c8c317f20e7b11841fc..3d499d93c083b24fa963618e3633da0c8ce6f893 100644 (file)
@@ -48,6 +48,7 @@
 #define DRIVER_VERSION "02 May 2005"
 
 #define POWER_BUDGET   500     /* in mA; use 8 for low-power port testing */
+#define POWER_BUDGET_3 900     /* in mA */
 
 static const char      driver_name[] = "dummy_hcd";
 static const char      driver_desc[] = "USB Host+Gadget Emulator";
@@ -2432,7 +2433,7 @@ static int dummy_start_ss(struct dummy_hcd *dum_hcd)
        dum_hcd->rh_state = DUMMY_RH_RUNNING;
        dum_hcd->stream_en_ep = 0;
        INIT_LIST_HEAD(&dum_hcd->urbp_list);
-       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
+       dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET_3;
        dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
        dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
 #ifdef CONFIG_USB_OTG
index 20141c3096f68ab4a2f51762dc0617715e473633..9a05863b28768a3e8fea85b376935baf961d36e9 100644 (file)
@@ -2576,7 +2576,7 @@ static int fsl_udc_remove(struct platform_device *pdev)
        dma_pool_destroy(udc_controller->td_pool);
        free_irq(udc_controller->irq, udc_controller);
        iounmap(dr_regs);
-       if (pdata->operating_mode == FSL_USB2_DR_DEVICE)
+       if (res && (pdata->operating_mode == FSL_USB2_DR_DEVICE))
                release_mem_region(res->start, resource_size(res));
 
        /* free udc --wait for the release() finished */
index b3e073fb88c6e746fb5ffb9dbabcd531a1b72dfd..bf6c81e2f8cccf9e2d6644d728141e7991749de9 100644 (file)
@@ -1151,7 +1151,7 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
        u32 *p32, tmp, cbytes;
 
        /* Use optimal data transfer method based on source address and size */
-       switch (((u32) data) & 0x3) {
+       switch (((uintptr_t) data) & 0x3) {
        case 0: /* 32-bit aligned */
                p32 = (u32 *) data;
                cbytes = (bytes & ~0x3);
@@ -1177,11 +1177,11 @@ static void udc_pop_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
                        tmp = readl(USBD_RXDATA(udc->udp_baseaddr));
 
                        bl = bytes - n;
-                       if (bl > 3)
-                               bl = 3;
+                       if (bl > 4)
+                               bl = 4;
 
                        for (i = 0; i < bl; i++)
-                               data[n + i] = (u8) ((tmp >> (n * 8)) & 0xFF);
+                               data[n + i] = (u8) ((tmp >> (i * 8)) & 0xFF);
                }
                break;
 
@@ -1252,7 +1252,7 @@ static void udc_stuff_fifo(struct lpc32xx_udc *udc, u8 *data, u32 bytes)
        u32 *p32, tmp, cbytes;
 
        /* Use optimal data transfer method based on source address and size */
-       switch (((u32) data) & 0x3) {
+       switch (((uintptr_t) data) & 0x3) {
        case 0: /* 32-bit aligned */
                p32 = (u32 *) data;
                cbytes = (bytes & ~0x3);
index e098f16c01cb5dedef6e4084c33a7ec39f43acdc..33703140233aa592001eaeb64298b5f119da9b0a 100644 (file)
@@ -1544,10 +1544,10 @@ static void usb3_set_device_address(struct renesas_usb3 *usb3, u16 addr)
 static bool usb3_std_req_set_address(struct renesas_usb3 *usb3,
                                     struct usb_ctrlrequest *ctrl)
 {
-       if (ctrl->wValue >= 128)
+       if (le16_to_cpu(ctrl->wValue) >= 128)
                return true;    /* stall */
 
-       usb3_set_device_address(usb3, ctrl->wValue);
+       usb3_set_device_address(usb3, le16_to_cpu(ctrl->wValue));
        usb3_set_p0_con_for_no_data(usb3);
 
        return false;
@@ -1582,6 +1582,7 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
        struct renesas_usb3_ep *usb3_ep;
        int num;
        u16 status = 0;
+       __le16 tx_data;
 
        switch (ctrl->bRequestType & USB_RECIP_MASK) {
        case USB_RECIP_DEVICE:
@@ -1604,10 +1605,10 @@ static bool usb3_std_req_get_status(struct renesas_usb3 *usb3,
        }
 
        if (!stall) {
-               status = cpu_to_le16(status);
+               tx_data = cpu_to_le16(status);
                dev_dbg(usb3_to_dev(usb3), "get_status: req = %p\n",
                        usb_req_to_usb3_req(usb3->ep0_req));
-               usb3_pipe0_internal_xfer(usb3, &status, sizeof(status),
+               usb3_pipe0_internal_xfer(usb3, &tx_data, sizeof(tx_data),
                                         usb3_pipe0_get_status_completion);
        }
 
@@ -1772,7 +1773,7 @@ static bool usb3_std_req_set_sel(struct renesas_usb3 *usb3,
 static bool usb3_std_req_set_configuration(struct renesas_usb3 *usb3,
                                           struct usb_ctrlrequest *ctrl)
 {
-       if (ctrl->wValue > 0)
+       if (le16_to_cpu(ctrl->wValue) > 0)
                usb3_set_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
        else
                usb3_clear_bit(usb3, USB_COM_CON_CONF, USB3_USB_COM_CON);
index 7ba6afc7ef230ed69da605ce48e31cac4783a709..76c3f29562d2b9d2e3263f3d3dc1b45b92728140 100644 (file)
@@ -202,10 +202,10 @@ static void xhci_ring_dump_segment(struct seq_file *s,
                trb = &seg->trbs[i];
                dma = seg->dma + i * sizeof(*trb);
                seq_printf(s, "%pad: %s\n", &dma,
-                          xhci_decode_trb(trb->generic.field[0],
-                                          trb->generic.field[1],
-                                          trb->generic.field[2],
-                                          trb->generic.field[3]));
+                          xhci_decode_trb(le32_to_cpu(trb->generic.field[0]),
+                                          le32_to_cpu(trb->generic.field[1]),
+                                          le32_to_cpu(trb->generic.field[2]),
+                                          le32_to_cpu(trb->generic.field[3])));
        }
 }
 
@@ -263,10 +263,10 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
        xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
        slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
        seq_printf(s, "%pad: %s\n", &dev->out_ctx->dma,
-                  xhci_decode_slot_context(slot_ctx->dev_info,
-                                           slot_ctx->dev_info2,
-                                           slot_ctx->tt_info,
-                                           slot_ctx->dev_state));
+                  xhci_decode_slot_context(le32_to_cpu(slot_ctx->dev_info),
+                                           le32_to_cpu(slot_ctx->dev_info2),
+                                           le32_to_cpu(slot_ctx->tt_info),
+                                           le32_to_cpu(slot_ctx->dev_state)));
 
        return 0;
 }
@@ -286,10 +286,10 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
                ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
                dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
                seq_printf(s, "%pad: %s\n", &dma,
-                          xhci_decode_ep_context(ep_ctx->ep_info,
-                                                 ep_ctx->ep_info2,
-                                                 ep_ctx->deq,
-                                                 ep_ctx->tx_info));
+                          xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
+                                                 le32_to_cpu(ep_ctx->ep_info2),
+                                                 le64_to_cpu(ep_ctx->deq),
+                                                 le32_to_cpu(ep_ctx->tx_info)));
        }
 
        return 0;
index f498160df969ef0cfe60d2306dfbf8821b20f140..3351d07c431f1836e25888252f12154761713931 100644 (file)
@@ -57,6 +57,7 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
                ret = platform_device_add_properties(pdev, role_switch_props);
                if (ret) {
                        dev_err(dev, "failed to register device properties\n");
+                       platform_device_put(pdev);
                        return ret;
                }
        }
index 9741cdeea9d7aaf772ae2c124dabe6bef6494d87..e7aab31fd9a5a38b260e498947de7808259098d0 100644 (file)
@@ -3202,10 +3202,10 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
        if (usb_urb_dir_out(urb)) {
                len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
                                   seg->bounce_buf, new_buff_len, enqd_len);
-               if (len != seg->bounce_len)
+               if (len != new_buff_len)
                        xhci_warn(xhci,
                                "WARN Wrong bounce buffer write length: %zu != %d\n",
-                               len, seg->bounce_len);
+                               len, new_buff_len);
                seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
                                                 max_pkt, DMA_TO_DEVICE);
        } else {
@@ -3330,6 +3330,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                        if (xhci_urb_suitable_for_idt(urb)) {
                                memcpy(&send_addr, urb->transfer_buffer,
                                       trb_buff_len);
+                               le64_to_cpus(&send_addr);
                                field |= TRB_IDT;
                        }
                }
@@ -3475,6 +3476,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                if (xhci_urb_suitable_for_idt(urb)) {
                        memcpy(&addr, urb->transfer_buffer,
                               urb->transfer_buffer_length);
+                       le64_to_cpus(&addr);
                        field |= TRB_IDT;
                } else {
                        addr = (u64) urb->transfer_dma;
index 5008659756877f28b5bdd2dba0f33ae6cfad532e..6c17e3fe181a2113464d4120984ad1125405888c 100644 (file)
@@ -1032,7 +1032,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
        writel(command, &xhci->op_regs->command);
        xhci->broken_suspend = 0;
        if (xhci_handshake(&xhci->op_regs->status,
-                               STS_SAVE, 0, 10 * 1000)) {
+                               STS_SAVE, 0, 20 * 1000)) {
        /*
         * AMD SNPS xHC 3.0 occasionally does not clear the
         * SSS bit of USBSTS and when driver tries to poll
@@ -1108,6 +1108,18 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
                hibernated = true;
 
        if (!hibernated) {
+               /*
+                * Some controllers might lose power during suspend, so wait
+                * for controller not ready bit to clear, just as in xHC init.
+                */
+               retval = xhci_handshake(&xhci->op_regs->status,
+                                       STS_CNR, 0, 10 * 1000 * 1000);
+               if (retval) {
+                       xhci_warn(xhci, "Controller not ready at resume %d\n",
+                                 retval);
+                       spin_unlock_irq(&xhci->lock);
+                       return retval;
+               }
                /* step 1: restore register */
                xhci_restore_registers(xhci);
                /* step 2: initialize command ring buffer */
@@ -3059,6 +3071,48 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int ep_index,
        }
 }
 
+static void xhci_endpoint_disable(struct usb_hcd *hcd,
+                                 struct usb_host_endpoint *host_ep)
+{
+       struct xhci_hcd         *xhci;
+       struct xhci_virt_device *vdev;
+       struct xhci_virt_ep     *ep;
+       struct usb_device       *udev;
+       unsigned long           flags;
+       unsigned int            ep_index;
+
+       xhci = hcd_to_xhci(hcd);
+rescan:
+       spin_lock_irqsave(&xhci->lock, flags);
+
+       udev = (struct usb_device *)host_ep->hcpriv;
+       if (!udev || !udev->slot_id)
+               goto done;
+
+       vdev = xhci->devs[udev->slot_id];
+       if (!vdev)
+               goto done;
+
+       ep_index = xhci_get_endpoint_index(&host_ep->desc);
+       ep = &vdev->eps[ep_index];
+       if (!ep)
+               goto done;
+
+       /* wait for hub_tt_work to finish clearing hub TT */
+       if (ep->ep_state & EP_CLEARING_TT) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               schedule_timeout_uninterruptible(1);
+               goto rescan;
+       }
+
+       if (ep->ep_state)
+               xhci_dbg(xhci, "endpoint disable with ep_state 0x%x\n",
+                        ep->ep_state);
+done:
+       host_ep->hcpriv = NULL;
+       spin_unlock_irqrestore(&xhci->lock, flags);
+}
+
 /*
  * Called after usb core issues a clear halt control message.
  * The host side of the halt should already be cleared by a reset endpoint
@@ -3083,6 +3137,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
        unsigned int ep_index;
        unsigned long flags;
        u32 ep_flag;
+       int err;
 
        xhci = hcd_to_xhci(hcd);
        if (!host_ep->hcpriv)
@@ -3142,7 +3197,17 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                xhci_free_command(xhci, cfg_cmd);
                goto cleanup;
        }
-       xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
+
+       err = xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id,
+                                       ep_index, 0);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue stop ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -3156,8 +3221,16 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
                                           ctrl_ctx, ep_flag, ep_flag);
        xhci_endpoint_copy(xhci, cfg_cmd->in_ctx, vdev->out_ctx, ep_index);
 
-       xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
+       err = xhci_queue_configure_endpoint(xhci, cfg_cmd, cfg_cmd->in_ctx->dma,
                                      udev->slot_id, false);
+       if (err < 0) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_free_command(xhci, cfg_cmd);
+               xhci_dbg(xhci, "%s: Failed to queue config ep command, %d ",
+                               __func__, err);
+               goto cleanup;
+       }
+
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
 
@@ -4674,12 +4747,12 @@ static int xhci_update_timeout_for_endpoint(struct xhci_hcd *xhci,
        alt_timeout = xhci_call_host_update_timeout_for_endpoint(xhci, udev,
                desc, state, timeout);
 
-       /* If we found we can't enable hub-initiated LPM, or
+       /* If we found we can't enable hub-initiated LPM, and
         * the U1 or U2 exit latency was too high to allow
-        * device-initiated LPM as well, just stop searching.
+        * device-initiated LPM as well, then we will disable LPM
+        * for this device, so stop searching any further.
         */
-       if (alt_timeout == USB3_LPM_DISABLED ||
-                       alt_timeout == USB3_LPM_DEVICE_INITIATED) {
+       if (alt_timeout == USB3_LPM_DISABLED) {
                *timeout = alt_timeout;
                return -E2BIG;
        }
@@ -4790,10 +4863,12 @@ static u16 xhci_calculate_lpm_timeout(struct usb_hcd *hcd,
                if (intf->dev.driver) {
                        driver = to_usb_driver(intf->dev.driver);
                        if (driver && driver->disable_hub_initiated_lpm) {
-                               dev_dbg(&udev->dev, "Hub-initiated %s disabled "
-                                               "at request of driver %s\n",
-                                               state_name, driver->name);
-                               return xhci_get_timeout_no_hub_lpm(udev, state);
+                               dev_dbg(&udev->dev, "Hub-initiated %s disabled at request of driver %s\n",
+                                       state_name, driver->name);
+                               timeout = xhci_get_timeout_no_hub_lpm(udev,
+                                                                     state);
+                               if (timeout == USB3_LPM_DISABLED)
+                                       return timeout;
                        }
                }
 
@@ -5077,11 +5152,18 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                hcd->has_tt = 1;
        } else {
                /*
-                * Some 3.1 hosts return sbrn 0x30, use xhci supported protocol
-                * minor revision instead of sbrn. Minor revision is a two digit
-                * BCD containing minor and sub-minor numbers, only show minor.
+                * Early xHCI 1.1 spec did not mention USB 3.1 capable hosts
+                * should return 0x31 for sbrn, or that the minor revision
+                * is a two digit BCD containig minor and sub-minor numbers.
+                * This was later clarified in xHCI 1.2.
+                *
+                * Some USB 3.1 capable hosts therefore have sbrn 0x30, and
+                * minor revision set to 0x1 instead of 0x10.
                 */
-               minor_rev = xhci->usb3_rhub.min_rev / 0x10;
+               if (xhci->usb3_rhub.min_rev == 0x1)
+                       minor_rev = 1;
+               else
+                       minor_rev = xhci->usb3_rhub.min_rev / 0x10;
 
                switch (minor_rev) {
                case 2:
@@ -5199,11 +5281,12 @@ static void xhci_clear_tt_buffer_complete(struct usb_hcd *hcd,
        unsigned long flags;
 
        xhci = hcd_to_xhci(hcd);
+
+       spin_lock_irqsave(&xhci->lock, flags);
        udev = (struct usb_device *)ep->hcpriv;
        slot_id = udev->slot_id;
        ep_index = xhci_get_endpoint_index(&ep->desc);
 
-       spin_lock_irqsave(&xhci->lock, flags);
        xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_CLEARING_TT;
        xhci_ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -5240,6 +5323,7 @@ static const struct hc_driver xhci_hc_driver = {
        .free_streams =         xhci_free_streams,
        .add_endpoint =         xhci_add_endpoint,
        .drop_endpoint =        xhci_drop_endpoint,
+       .endpoint_disable =     xhci_endpoint_disable,
        .endpoint_reset =       xhci_endpoint_reset,
        .check_bandwidth =      xhci_check_bandwidth,
        .reset_bandwidth =      xhci_reset_bandwidth,
index 0a57c2cc8e5af2ae7dd5e0b3c9a3c0e5191788a3..7a6b122c833fbeaece8a02782ab26581c6480f2f 100644 (file)
@@ -716,6 +716,10 @@ static int mts_usb_probe(struct usb_interface *intf,
 
        }
 
+       if (ep_in_current != &ep_in_set[2]) {
+               MTS_WARNING("couldn't find two input bulk endpoints. Bailing out.\n");
+               return -ENODEV;
+       }
 
        if ( ep_out == -1 ) {
                MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
index bdae62b2ffe0c7c955c2a66f360b41ff6625ba5f..9bce583aada3af2e27af4ec8569ea38aa2e4e96b 100644 (file)
@@ -47,16 +47,6 @@ config USB_SEVSEG
          To compile this driver as a module, choose M here: the
          module will be called usbsevseg.
 
-config USB_RIO500
-       tristate "USB Diamond Rio500 support"
-       help
-         Say Y here if you want to connect a USB Rio500 mp3 player to your
-         computer's USB port. Please read <file:Documentation/usb/rio.rst>
-         for more information.
-
-         To compile this driver as a module, choose M here: the
-         module will be called rio500.
-
 config USB_LEGOTOWER
        tristate "USB Lego Infrared Tower support"
        help
index 109f54f5b9aa4b8ecc69c9f301e6ff3d668cfd93..0d416eb624bbe12247493afa40fe60242086e0d7 100644 (file)
@@ -17,7 +17,6 @@ obj-$(CONFIG_USB_ISIGHTFW)            += isight_firmware.o
 obj-$(CONFIG_USB_LCD)                  += usblcd.o
 obj-$(CONFIG_USB_LD)                   += ldusb.o
 obj-$(CONFIG_USB_LEGOTOWER)            += legousbtower.o
-obj-$(CONFIG_USB_RIO500)               += rio500.o
 obj-$(CONFIG_USB_TEST)                 += usbtest.o
 obj-$(CONFIG_USB_EHSET_TEST_FIXTURE)    += ehset.o
 obj-$(CONFIG_USB_TRANCEVIBRATOR)       += trancevibrator.o
index 344d523b05023047c6fc5efbc9b260ad2f098f05..6f5edb9fc61e024cc9549497c7d2629230b585e0 100644 (file)
@@ -75,6 +75,7 @@ struct adu_device {
        char                    serial_number[8];
 
        int                     open_count; /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char            *read_buffer_primary;
        int                     read_buffer_length;
@@ -116,7 +117,7 @@ static void adu_abort_transfers(struct adu_device *dev)
 {
        unsigned long flags;
 
-       if (dev->udev == NULL)
+       if (dev->disconnected)
                return;
 
        /* shutdown transfer */
@@ -148,6 +149,7 @@ static void adu_delete(struct adu_device *dev)
        kfree(dev->read_buffer_secondary);
        kfree(dev->interrupt_in_buffer);
        kfree(dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree(dev);
 }
 
@@ -243,7 +245,7 @@ static int adu_open(struct inode *inode, struct file *file)
        }
 
        dev = usb_get_intfdata(interface);
-       if (!dev || !dev->udev) {
+       if (!dev) {
                retval = -ENODEV;
                goto exit_no_device;
        }
@@ -326,7 +328,7 @@ static int adu_release(struct inode *inode, struct file *file)
        }
 
        adu_release_internal(dev);
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                if (!dev->open_count)   /* ... and we're the last user */
                        adu_delete(dev);
@@ -354,7 +356,7 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
                return -ERESTARTSYS;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -518,7 +520,7 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
                goto exit_nolock;
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto exit;
@@ -663,7 +665,7 @@ static int adu_probe(struct usb_interface *interface,
 
        mutex_init(&dev->mtx);
        spin_lock_init(&dev->buflock);
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        init_waitqueue_head(&dev->read_wait);
        init_waitqueue_head(&dev->write_wait);
 
@@ -762,14 +764,18 @@ static void adu_disconnect(struct usb_interface *interface)
 
        dev = usb_get_intfdata(interface);
 
-       mutex_lock(&dev->mtx);  /* not interruptible */
-       dev->udev = NULL;       /* poison */
        usb_deregister_dev(interface, &adu_class);
-       mutex_unlock(&dev->mtx);
+
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
 
        mutex_lock(&adutux_mutex);
        usb_set_intfdata(interface, NULL);
 
+       mutex_lock(&dev->mtx);  /* not interruptible */
+       dev->disconnected = 1;
+       mutex_unlock(&dev->mtx);
+
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count)
                adu_delete(dev);
index cf5828ce927a835af30de0dd739d0ce7e455a2db..34e6cd6f40d3005fbda6dc428be6c7e19fccf6bb 100644 (file)
@@ -98,6 +98,7 @@ static void chaoskey_free(struct chaoskey *dev)
                usb_free_urb(dev->urb);
                kfree(dev->name);
                kfree(dev->buf);
+               usb_put_intf(dev->interface);
                kfree(dev);
        }
 }
@@ -145,6 +146,8 @@ static int chaoskey_probe(struct usb_interface *interface,
        if (dev == NULL)
                goto out;
 
+       dev->interface = usb_get_intf(interface);
+
        dev->buf = kmalloc(size, GFP_KERNEL);
 
        if (dev->buf == NULL)
@@ -174,8 +177,6 @@ static int chaoskey_probe(struct usb_interface *interface,
                        goto out;
        }
 
-       dev->interface = interface;
-
        dev->in_ep = in_ep;
 
        if (le16_to_cpu(udev->descriptor.idVendor) != ALEA_VENDOR_ID)
index f5bed9f29e5621521b4ea754ed76a5442744492d..dce44fbf031fb2875498632dba484fe3866ad045 100644 (file)
@@ -54,11 +54,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL");
 
-/* Module parameters */
-static DEFINE_MUTEX(iowarrior_mutex);
-
 static struct usb_driver iowarrior_driver;
-static DEFINE_MUTEX(iowarrior_open_disc_lock);
 
 /*--------------*/
 /*     data     */
@@ -87,6 +83,7 @@ struct iowarrior {
        char chip_serial[9];            /* the serial number string of the chip connected */
        int report_size;                /* number of bytes in a report */
        u16 product_id;
+       struct usb_anchor submitted;
 };
 
 /*--------------*/
@@ -243,6 +240,7 @@ static inline void iowarrior_delete(struct iowarrior *dev)
        kfree(dev->int_in_buffer);
        usb_free_urb(dev->int_in_urb);
        kfree(dev->read_queue);
+       usb_put_intf(dev->interface);
        kfree(dev);
 }
 
@@ -424,11 +422,13 @@ static ssize_t iowarrior_write(struct file *file,
                        retval = -EFAULT;
                        goto error;
                }
+               usb_anchor_urb(int_out_urb, &dev->submitted);
                retval = usb_submit_urb(int_out_urb, GFP_KERNEL);
                if (retval) {
                        dev_dbg(&dev->interface->dev,
                                "submit error %d for urb nr.%d\n",
                                retval, atomic_read(&dev->write_busy));
+                       usb_unanchor_urb(int_out_urb);
                        goto error;
                }
                /* submit was ok */
@@ -477,8 +477,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
        if (!buffer)
                return -ENOMEM;
 
-       /* lock this object */
-       mutex_lock(&iowarrior_mutex);
        mutex_lock(&dev->mutex);
 
        /* verify that the device wasn't unplugged */
@@ -571,7 +569,6 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
 error_out:
        /* unlock the device */
        mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_mutex);
        kfree(buffer);
        return retval;
 }
@@ -586,27 +583,20 @@ static int iowarrior_open(struct inode *inode, struct file *file)
        int subminor;
        int retval = 0;
 
-       mutex_lock(&iowarrior_mutex);
        subminor = iminor(inode);
 
        interface = usb_find_interface(&iowarrior_driver, subminor);
        if (!interface) {
-               mutex_unlock(&iowarrior_mutex);
-               printk(KERN_ERR "%s - error, can't find device for minor %d\n",
+               pr_err("%s - error, can't find device for minor %d\n",
                       __func__, subminor);
                return -ENODEV;
        }
 
-       mutex_lock(&iowarrior_open_disc_lock);
        dev = usb_get_intfdata(interface);
-       if (!dev) {
-               mutex_unlock(&iowarrior_open_disc_lock);
-               mutex_unlock(&iowarrior_mutex);
+       if (!dev)
                return -ENODEV;
-       }
 
        mutex_lock(&dev->mutex);
-       mutex_unlock(&iowarrior_open_disc_lock);
 
        /* Only one process can open each device, no sharing. */
        if (dev->opened) {
@@ -628,7 +618,6 @@ static int iowarrior_open(struct inode *inode, struct file *file)
 
 out:
        mutex_unlock(&dev->mutex);
-       mutex_unlock(&iowarrior_mutex);
        return retval;
 }
 
@@ -764,11 +753,13 @@ static int iowarrior_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->write_wait);
 
        dev->udev = udev;
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        iface_desc = interface->cur_altsetting;
        dev->product_id = le16_to_cpu(udev->descriptor.idProduct);
 
+       init_usb_anchor(&dev->submitted);
+
        res = usb_find_last_int_in_endpoint(iface_desc, &dev->int_in_endpoint);
        if (res) {
                dev_err(&interface->dev, "no interrupt-in endpoint found\n");
@@ -836,7 +827,6 @@ static int iowarrior_probe(struct usb_interface *interface,
        if (retval) {
                /* something prevented us from registering this driver */
                dev_err(&interface->dev, "Not able to get a minor for this device.\n");
-               usb_set_intfdata(interface, NULL);
                goto error;
        }
 
@@ -860,26 +850,15 @@ error:
  */
 static void iowarrior_disconnect(struct usb_interface *interface)
 {
-       struct iowarrior *dev;
-       int minor;
-
-       dev = usb_get_intfdata(interface);
-       mutex_lock(&iowarrior_open_disc_lock);
-       usb_set_intfdata(interface, NULL);
-       /* prevent device read, write and ioctl */
-       dev->present = 0;
-
-       minor = dev->minor;
-       mutex_unlock(&iowarrior_open_disc_lock);
-       /* give back our minor - this will call close() locks need to be dropped at this point*/
+       struct iowarrior *dev = usb_get_intfdata(interface);
+       int minor = dev->minor;
 
        usb_deregister_dev(interface, &iowarrior_class);
 
        mutex_lock(&dev->mutex);
 
        /* prevent device read, write and ioctl */
-
-       mutex_unlock(&dev->mutex);
+       dev->present = 0;
 
        if (dev->opened) {
                /* There is a process that holds a filedescriptor to the device ,
@@ -887,10 +866,13 @@ static void iowarrior_disconnect(struct usb_interface *interface)
                   Deleting the device is postponed until close() was called.
                 */
                usb_kill_urb(dev->int_in_urb);
+               usb_kill_anchored_urbs(&dev->submitted);
                wake_up_interruptible(&dev->read_wait);
                wake_up_interruptible(&dev->write_wait);
+               mutex_unlock(&dev->mutex);
        } else {
                /* no process is using the device, cleanup now */
+               mutex_unlock(&dev->mutex);
                iowarrior_delete(dev);
        }
 
index 6581774bdfa4929611ba605d693970229eba3efb..8f86b4ebca898b32e572cba6006ca01ab414b2ea 100644 (file)
@@ -153,6 +153,7 @@ MODULE_PARM_DESC(min_interrupt_out_interval, "Minimum interrupt out interval in
 struct ld_usb {
        struct mutex            mutex;          /* locks this structure */
        struct usb_interface    *intf;          /* save off the usb interface pointer */
+       unsigned long           disconnected:1;
 
        int                     open_count;     /* number of times this port has been opened */
 
@@ -192,12 +193,10 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
        /* shutdown transfer */
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
        if (dev->interrupt_out_busy)
-               if (dev->intf)
-                       usb_kill_urb(dev->interrupt_out_urb);
+               usb_kill_urb(dev->interrupt_out_urb);
 }
 
 /**
@@ -205,8 +204,6 @@ static void ld_usb_abort_transfers(struct ld_usb *dev)
  */
 static void ld_usb_delete(struct ld_usb *dev)
 {
-       ld_usb_abort_transfers(dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
@@ -263,7 +260,7 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && !dev->buffer_overflow && dev->intf) {
+       if (dev->interrupt_in_running && !dev->buffer_overflow) {
                retval = usb_submit_urb(dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval) {
                        dev_err(&dev->intf->dev,
@@ -383,16 +380,13 @@ static int ld_usb_release(struct inode *inode, struct file *file)
                goto exit;
        }
 
-       if (mutex_lock_interruptible(&dev->mutex)) {
-               retval = -ERESTARTSYS;
-               goto exit;
-       }
+       mutex_lock(&dev->mutex);
 
        if (dev->open_count != 1) {
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
                mutex_unlock(&dev->mutex);
                /* unlock here as ld_usb_delete frees dev */
@@ -423,7 +417,7 @@ static __poll_t ld_usb_poll(struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->intf)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -462,7 +456,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -470,7 +464,7 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
 
        /* wait for data */
        spin_lock_irq(&dev->rbsl);
-       if (dev->ring_head == dev->ring_tail) {
+       while (dev->ring_head == dev->ring_tail) {
                dev->interrupt_in_done = 0;
                spin_unlock_irq(&dev->rbsl);
                if (file->f_flags & O_NONBLOCK) {
@@ -480,15 +474,20 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = wait_event_interruptible(dev->read_wait, dev->interrupt_in_done);
                if (retval < 0)
                        goto unlock_exit;
-       } else {
-               spin_unlock_irq(&dev->rbsl);
+
+               spin_lock_irq(&dev->rbsl);
        }
+       spin_unlock_irq(&dev->rbsl);
 
        /* actual_buffer contains actual_length + interrupt_in_buffer */
        actual_buffer = (size_t *)(dev->ring_buffer + dev->ring_tail * (sizeof(size_t)+dev->interrupt_in_endpoint_size));
+       if (*actual_buffer > dev->interrupt_in_endpoint_size) {
+               retval = -EIO;
+               goto unlock_exit;
+       }
        bytes_to_read = min(count, *actual_buffer);
        if (bytes_to_read < *actual_buffer)
-               dev_warn(&dev->intf->dev, "Read buffer overflow, %zd bytes dropped\n",
+               dev_warn(&dev->intf->dev, "Read buffer overflow, %zu bytes dropped\n",
                         *actual_buffer-bytes_to_read);
 
        /* copy one interrupt_in_buffer from ring_buffer into userspace */
@@ -496,11 +495,11 @@ static ssize_t ld_usb_read(struct file *file, char __user *buffer, size_t count,
                retval = -EFAULT;
                goto unlock_exit;
        }
-       dev->ring_tail = (dev->ring_tail+1) % ring_buffer_size;
-
        retval = bytes_to_read;
 
        spin_lock_irq(&dev->rbsl);
+       dev->ring_tail = (dev->ring_tail + 1) % ring_buffer_size;
+
        if (dev->buffer_overflow) {
                dev->buffer_overflow = 0;
                spin_unlock_irq(&dev->rbsl);
@@ -542,7 +541,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->intf == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                printk(KERN_ERR "ldusb: No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -563,8 +562,9 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
        /* write the data into interrupt_out_buffer from userspace */
        bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
        if (bytes_to_write < count)
-               dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n", count-bytes_to_write);
-       dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
+               dev_warn(&dev->intf->dev, "Write buffer overflow, %zu bytes dropped\n",
+                       count - bytes_to_write);
+       dev_dbg(&dev->intf->dev, "%s: count = %zu, bytes_to_write = %zu\n",
                __func__, count, bytes_to_write);
 
        if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
@@ -581,7 +581,7 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
                                         1 << 8, 0,
                                         dev->interrupt_out_buffer,
                                         bytes_to_write,
-                                        USB_CTRL_SET_TIMEOUT * HZ);
+                                        USB_CTRL_SET_TIMEOUT);
                if (retval < 0)
                        dev_err(&dev->intf->dev,
                                "Couldn't submit HID_REQ_SET_REPORT %d\n",
@@ -696,10 +696,9 @@ static int ld_usb_probe(struct usb_interface *intf, const struct usb_device_id *
                dev_warn(&intf->dev, "Interrupt out endpoint not found (using control endpoint instead)\n");
 
        dev->interrupt_in_endpoint_size = usb_endpoint_maxp(dev->interrupt_in_endpoint);
-       dev->ring_buffer =
-               kmalloc_array(ring_buffer_size,
-                             sizeof(size_t) + dev->interrupt_in_endpoint_size,
-                             GFP_KERNEL);
+       dev->ring_buffer = kcalloc(ring_buffer_size,
+                       sizeof(size_t) + dev->interrupt_in_endpoint_size,
+                       GFP_KERNEL);
        if (!dev->ring_buffer)
                goto error;
        dev->interrupt_in_buffer = kmalloc(dev->interrupt_in_endpoint_size, GFP_KERNEL);
@@ -764,6 +763,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
        /* give back our minor */
        usb_deregister_dev(intf, &ld_usb_class);
 
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->mutex);
 
        /* if the device is not opened, then we clean up right now */
@@ -771,7 +773,7 @@ static void ld_usb_disconnect(struct usb_interface *intf)
                mutex_unlock(&dev->mutex);
                ld_usb_delete(dev);
        } else {
-               dev->intf = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
index 006cf13b21999f58e512579fa62ee399907dba6b..23061f1526b4e03be818de50add926e982187ae2 100644 (file)
@@ -179,7 +179,6 @@ static const struct usb_device_id tower_table[] = {
 };
 
 MODULE_DEVICE_TABLE (usb, tower_table);
-static DEFINE_MUTEX(open_disc_mutex);
 
 #define LEGO_USB_TOWER_MINOR_BASE      160
 
@@ -191,6 +190,7 @@ struct lego_usb_tower {
        unsigned char           minor;          /* the starting minor number for this device */
 
        int                     open_count;     /* number of times this port has been opened */
+       unsigned long           disconnected:1;
 
        char*                   read_buffer;
        size_t                  read_buffer_length; /* this much came in */
@@ -290,14 +290,13 @@ static inline void lego_usb_tower_debug_data(struct device *dev,
  */
 static inline void tower_delete (struct lego_usb_tower *dev)
 {
-       tower_abort_transfers (dev);
-
        /* free data structures */
        usb_free_urb(dev->interrupt_in_urb);
        usb_free_urb(dev->interrupt_out_urb);
        kfree (dev->read_buffer);
        kfree (dev->interrupt_in_buffer);
        kfree (dev->interrupt_out_buffer);
+       usb_put_dev(dev->udev);
        kfree (dev);
 }
 
@@ -332,18 +331,14 @@ static int tower_open (struct inode *inode, struct file *file)
                goto exit;
        }
 
-       mutex_lock(&open_disc_mutex);
        dev = usb_get_intfdata(interface);
-
        if (!dev) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ENODEV;
                goto exit;
        }
 
        /* lock this device */
        if (mutex_lock_interruptible(&dev->lock)) {
-               mutex_unlock(&open_disc_mutex);
                retval = -ERESTARTSYS;
                goto exit;
        }
@@ -351,12 +346,9 @@ static int tower_open (struct inode *inode, struct file *file)
 
        /* allow opening only once */
        if (dev->open_count) {
-               mutex_unlock(&open_disc_mutex);
                retval = -EBUSY;
                goto unlock_exit;
        }
-       dev->open_count = 1;
-       mutex_unlock(&open_disc_mutex);
 
        /* reset the tower */
        result = usb_control_msg (dev->udev,
@@ -396,13 +388,14 @@ static int tower_open (struct inode *inode, struct file *file)
                dev_err(&dev->udev->dev,
                        "Couldn't submit interrupt_in_urb %d\n", retval);
                dev->interrupt_in_running = 0;
-               dev->open_count = 0;
                goto unlock_exit;
        }
 
        /* save device in the file's private structure */
        file->private_data = dev;
 
+       dev->open_count = 1;
+
 unlock_exit:
        mutex_unlock(&dev->lock);
 
@@ -423,22 +416,19 @@ static int tower_release (struct inode *inode, struct file *file)
 
        if (dev == NULL) {
                retval = -ENODEV;
-               goto exit_nolock;
-       }
-
-       mutex_lock(&open_disc_mutex);
-       if (mutex_lock_interruptible(&dev->lock)) {
-               retval = -ERESTARTSYS;
                goto exit;
        }
 
+       mutex_lock(&dev->lock);
+
        if (dev->open_count != 1) {
                dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
                        __func__);
                retval = -ENODEV;
                goto unlock_exit;
        }
-       if (dev->udev == NULL) {
+
+       if (dev->disconnected) {
                /* the device was unplugged before the file was released */
 
                /* unlock here as tower_delete frees dev */
@@ -456,10 +446,7 @@ static int tower_release (struct inode *inode, struct file *file)
 
 unlock_exit:
        mutex_unlock(&dev->lock);
-
 exit:
-       mutex_unlock(&open_disc_mutex);
-exit_nolock:
        return retval;
 }
 
@@ -477,10 +464,9 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
        if (dev->interrupt_in_running) {
                dev->interrupt_in_running = 0;
                mb();
-               if (dev->udev)
-                       usb_kill_urb (dev->interrupt_in_urb);
+               usb_kill_urb(dev->interrupt_in_urb);
        }
-       if (dev->interrupt_out_busy && dev->udev)
+       if (dev->interrupt_out_busy)
                usb_kill_urb(dev->interrupt_out_urb);
 }
 
@@ -516,7 +502,7 @@ static __poll_t tower_poll (struct file *file, poll_table *wait)
 
        dev = file->private_data;
 
-       if (!dev->udev)
+       if (dev->disconnected)
                return EPOLLERR | EPOLLHUP;
 
        poll_wait(file, &dev->read_wait, wait);
@@ -563,7 +549,7 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -649,7 +635,7 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
        }
 
        /* verify that the device wasn't unplugged */
-       if (dev->udev == NULL) {
+       if (dev->disconnected) {
                retval = -ENODEV;
                pr_err("No device or device unplugged %d\n", retval);
                goto unlock_exit;
@@ -759,7 +745,7 @@ static void tower_interrupt_in_callback (struct urb *urb)
 
 resubmit:
        /* resubmit if we're still running */
-       if (dev->interrupt_in_running && dev->udev) {
+       if (dev->interrupt_in_running) {
                retval = usb_submit_urb (dev->interrupt_in_urb, GFP_ATOMIC);
                if (retval)
                        dev_err(&dev->udev->dev,
@@ -822,8 +808,9 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
 
        mutex_init(&dev->lock);
 
-       dev->udev = udev;
+       dev->udev = usb_get_dev(udev);
        dev->open_count = 0;
+       dev->disconnected = 0;
 
        dev->read_buffer = NULL;
        dev->read_buffer_length = 0;
@@ -891,8 +878,10 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
                                  get_version_reply,
                                  sizeof(*get_version_reply),
                                  1000);
-       if (result < 0) {
-               dev_err(idev, "LEGO USB Tower get version control request failed\n");
+       if (result != sizeof(*get_version_reply)) {
+               if (result >= 0)
+                       result = -EIO;
+               dev_err(idev, "get version request failed: %d\n", result);
                retval = result;
                goto error;
        }
@@ -910,7 +899,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
        if (retval) {
                /* something prevented us from registering this driver */
                dev_err(idev, "Not able to get a minor for this device.\n");
-               usb_set_intfdata (interface, NULL);
                goto error;
        }
        dev->minor = interface->minor;
@@ -942,23 +930,24 @@ static void tower_disconnect (struct usb_interface *interface)
        int minor;
 
        dev = usb_get_intfdata (interface);
-       mutex_lock(&open_disc_mutex);
-       usb_set_intfdata (interface, NULL);
 
        minor = dev->minor;
 
-       /* give back our minor */
+       /* give back our minor and prevent further open() */
        usb_deregister_dev (interface, &tower_class);
 
+       /* stop I/O */
+       usb_poison_urb(dev->interrupt_in_urb);
+       usb_poison_urb(dev->interrupt_out_urb);
+
        mutex_lock(&dev->lock);
-       mutex_unlock(&open_disc_mutex);
 
        /* if the device is not opened, then we clean up right now */
        if (!dev->open_count) {
                mutex_unlock(&dev->lock);
                tower_delete (dev);
        } else {
-               dev->udev = NULL;
+               dev->disconnected = 1;
                /* wake up pollers */
                wake_up_interruptible_all(&dev->read_wait);
                wake_up_interruptible_all(&dev->write_wait);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
deleted file mode 100644 (file)
index 30cae5e..0000000
+++ /dev/null
@@ -1,554 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/* -*- linux-c -*- */
-
-/* 
- * Driver for USB Rio 500
- *
- * Cesar Miquel (miquel@df.uba.ar)
- * 
- * based on hp_scanner.c by David E. Nelson (dnelson@jump.net)
- *
- * Based upon mouse.c (Brad Keryan) and printer.c (Michael Gee).
- *
- * Changelog:
- * 30/05/2003  replaced lock/unlock kernel with up/down
- *             Daniele Bellucci  bellucda@tiscali.it
- * */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/signal.h>
-#include <linux/sched/signal.h>
-#include <linux/mutex.h>
-#include <linux/errno.h>
-#include <linux/random.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/usb.h>
-#include <linux/wait.h>
-
-#include "rio500_usb.h"
-
-#define DRIVER_AUTHOR "Cesar Miquel <miquel@df.uba.ar>"
-#define DRIVER_DESC "USB Rio 500 driver"
-
-#define RIO_MINOR      64
-
-/* stall/wait timeout for rio */
-#define NAK_TIMEOUT (HZ)
-
-#define IBUF_SIZE 0x1000
-
-/* Size of the rio buffer */
-#define OBUF_SIZE 0x10000
-
-struct rio_usb_data {
-        struct usb_device *rio_dev;     /* init: probe_rio */
-        unsigned int ifnum;             /* Interface number of the USB device */
-        int isopen;                     /* nz if open */
-        int present;                    /* Device is present on the bus */
-        char *obuf, *ibuf;              /* transfer buffers */
-        char bulk_in_ep, bulk_out_ep;   /* Endpoint assignments */
-        wait_queue_head_t wait_q;       /* for timeouts */
-};
-
-static DEFINE_MUTEX(rio500_mutex);
-static struct rio_usb_data rio_instance;
-
-static int open_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-
-       if (rio->isopen || !rio->present) {
-               mutex_unlock(&rio500_mutex);
-               return -EBUSY;
-       }
-       rio->isopen = 1;
-
-       init_waitqueue_head(&rio->wait_q);
-
-
-       dev_info(&rio->rio_dev->dev, "Rio opened.\n");
-       mutex_unlock(&rio500_mutex);
-
-       return 0;
-}
-
-static int close_rio(struct inode *inode, struct file *file)
-{
-       struct rio_usb_data *rio = &rio_instance;
-
-       /* against disconnect() */
-       mutex_lock(&rio500_mutex);
-
-       rio->isopen = 0;
-       if (!rio->present) {
-               /* cleanup has been delayed */
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-               rio->ibuf = NULL;
-               rio->obuf = NULL;
-       } else {
-               dev_info(&rio->rio_dev->dev, "Rio closed.\n");
-       }
-       mutex_unlock(&rio500_mutex);
-       return 0;
-}
-
-static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
-{
-       struct RioCommand rio_cmd;
-       struct rio_usb_data *rio = &rio_instance;
-       void __user *data;
-       unsigned char *buffer;
-       int result, requesttype;
-       int retries;
-       int retval=0;
-
-       mutex_lock(&rio500_mutex);
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               retval = -ENODEV;
-               goto err_out;
-       }
-
-       switch (cmd) {
-       case RIO_RECV_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       retval = -EFAULT;
-                       free_page((unsigned long) buffer);
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_IN |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command:reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_rcvctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d (data=%02x)\n",
-                                       result, buffer[0]);
-                               if (copy_to_user(rio_cmd.buffer, buffer,
-                                                rio_cmd.length)) {
-                                       free_page((unsigned long) buffer);
-                                       retval = -EFAULT;
-                                       goto err_out;
-                               }
-                               retries = 0;
-                       }
-
-                       /* rio_cmd.buffer contains a raw stream of single byte
-                          data which has been returned from rio.  Data is
-                          interpreted at application level.  For data that
-                          will be cast to data types longer than 1 byte, data
-                          will be little_endian and will potentially need to
-                          be swapped at the app level */
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       case RIO_SEND_COMMAND:
-               data = (void __user *) arg;
-               if (data == NULL)
-                       break;
-               if (copy_from_user(&rio_cmd, data, sizeof(struct RioCommand))) {
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-               if (rio_cmd.length < 0 || rio_cmd.length > PAGE_SIZE) {
-                       retval = -EINVAL;
-                       goto err_out;
-               }
-               buffer = (unsigned char *) __get_free_page(GFP_KERNEL);
-               if (buffer == NULL) {
-                       retval = -ENOMEM;
-                       goto err_out;
-               }
-               if (copy_from_user(buffer, rio_cmd.buffer, rio_cmd.length)) {
-                       free_page((unsigned long)buffer);
-                       retval = -EFAULT;
-                       goto err_out;
-               }
-
-               requesttype = rio_cmd.requesttype | USB_DIR_OUT |
-                   USB_TYPE_VENDOR | USB_RECIP_DEVICE;
-               dev_dbg(&rio->rio_dev->dev,
-                       "sending command: reqtype=%0x req=%0x value=%0x index=%0x len=%0x\n",
-                       requesttype, rio_cmd.request, rio_cmd.value,
-                       rio_cmd.index, rio_cmd.length);
-               /* Send rio control message */
-               retries = 3;
-               while (retries) {
-                       result = usb_control_msg(rio->rio_dev,
-                                                usb_sndctrlpipe(rio-> rio_dev, 0),
-                                                rio_cmd.request,
-                                                requesttype,
-                                                rio_cmd.value,
-                                                rio_cmd.index, buffer,
-                                                rio_cmd.length,
-                                                jiffies_to_msecs(rio_cmd.timeout));
-                       if (result == -ETIMEDOUT)
-                               retries--;
-                       else if (result < 0) {
-                               dev_err(&rio->rio_dev->dev,
-                                       "Error executing ioctrl. code = %d\n",
-                                       result);
-                               retries = 0;
-                       } else {
-                               dev_dbg(&rio->rio_dev->dev,
-                                       "Executed ioctl. Result = %d\n", result);
-                               retries = 0;
-
-                       }
-
-               }
-               free_page((unsigned long) buffer);
-               break;
-
-       default:
-               retval = -ENOTTY;
-               break;
-       }
-
-
-err_out:
-       mutex_unlock(&rio500_mutex);
-       return retval;
-}
-
-static ssize_t
-write_rio(struct file *file, const char __user *buffer,
-         size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-
-       unsigned long copy_size;
-       unsigned long bytes_written = 0;
-       unsigned int partial;
-
-       int result = 0;
-       int maxretry;
-       int errn = 0;
-       int intr;
-
-       intr = mutex_lock_interruptible(&rio500_mutex);
-       if (intr)
-               return -EINTR;
-        /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&rio500_mutex);
-               return -ENODEV;
-       }
-
-
-
-       do {
-               unsigned long thistime;
-               char *obuf = rio->obuf;
-
-               thistime = copy_size =
-                   (count >= OBUF_SIZE) ? OBUF_SIZE : count;
-               if (copy_from_user(rio->obuf, buffer, copy_size)) {
-                       errn = -EFAULT;
-                       goto error;
-               }
-               maxretry = 5;
-               while (thistime) {
-                       if (!rio->rio_dev) {
-                               errn = -ENODEV;
-                               goto error;
-                       }
-                       if (signal_pending(current)) {
-                               mutex_unlock(&rio500_mutex);
-                               return bytes_written ? bytes_written : -EINTR;
-                       }
-
-                       result = usb_bulk_msg(rio->rio_dev,
-                                        usb_sndbulkpipe(rio->rio_dev, 2),
-                                        obuf, thistime, &partial, 5000);
-
-                       dev_dbg(&rio->rio_dev->dev,
-                               "write stats: result:%d thistime:%lu partial:%u\n",
-                               result, thistime, partial);
-
-                       if (result == -ETIMEDOUT) {     /* NAK - so hold for a while */
-                               if (!maxretry--) {
-                                       errn = -ETIME;
-                                       goto error;
-                               }
-                               prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                               schedule_timeout(NAK_TIMEOUT);
-                               finish_wait(&rio->wait_q, &wait);
-                               continue;
-                       } else if (!result && partial) {
-                               obuf += partial;
-                               thistime -= partial;
-                       } else
-                               break;
-               }
-               if (result) {
-                       dev_err(&rio->rio_dev->dev, "Write Whoops - %x\n",
-                               result);
-                       errn = -EIO;
-                       goto error;
-               }
-               bytes_written += copy_size;
-               count -= copy_size;
-               buffer += copy_size;
-       } while (count > 0);
-
-       mutex_unlock(&rio500_mutex);
-
-       return bytes_written ? bytes_written : -EIO;
-
-error:
-       mutex_unlock(&rio500_mutex);
-       return errn;
-}
-
-static ssize_t
-read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
-{
-       DEFINE_WAIT(wait);
-       struct rio_usb_data *rio = &rio_instance;
-       ssize_t read_count;
-       unsigned int partial;
-       int this_read;
-       int result;
-       int maxretry = 10;
-       char *ibuf;
-       int intr;
-
-       intr = mutex_lock_interruptible(&rio500_mutex);
-       if (intr)
-               return -EINTR;
-       /* Sanity check to make sure rio is connected, powered, etc */
-        if (rio->present == 0 || rio->rio_dev == NULL) {
-               mutex_unlock(&rio500_mutex);
-               return -ENODEV;
-       }
-
-       ibuf = rio->ibuf;
-
-       read_count = 0;
-
-
-       while (count > 0) {
-               if (signal_pending(current)) {
-                       mutex_unlock(&rio500_mutex);
-                       return read_count ? read_count : -EINTR;
-               }
-               if (!rio->rio_dev) {
-                       mutex_unlock(&rio500_mutex);
-                       return -ENODEV;
-               }
-               this_read = (count >= IBUF_SIZE) ? IBUF_SIZE : count;
-
-               result = usb_bulk_msg(rio->rio_dev,
-                                     usb_rcvbulkpipe(rio->rio_dev, 1),
-                                     ibuf, this_read, &partial,
-                                     8000);
-
-               dev_dbg(&rio->rio_dev->dev,
-                       "read stats: result:%d this_read:%u partial:%u\n",
-                       result, this_read, partial);
-
-               if (partial) {
-                       count = this_read = partial;
-               } else if (result == -ETIMEDOUT || result == 15) {      /* FIXME: 15 ??? */
-                       if (!maxretry--) {
-                               mutex_unlock(&rio500_mutex);
-                               dev_err(&rio->rio_dev->dev,
-                                       "read_rio: maxretry timeout\n");
-                               return -ETIME;
-                       }
-                       prepare_to_wait(&rio->wait_q, &wait, TASK_INTERRUPTIBLE);
-                       schedule_timeout(NAK_TIMEOUT);
-                       finish_wait(&rio->wait_q, &wait);
-                       continue;
-               } else if (result != -EREMOTEIO) {
-                       mutex_unlock(&rio500_mutex);
-                       dev_err(&rio->rio_dev->dev,
-                               "Read Whoops - result:%d partial:%u this_read:%u\n",
-                               result, partial, this_read);
-                       return -EIO;
-               } else {
-                       mutex_unlock(&rio500_mutex);
-                       return (0);
-               }
-
-               if (this_read) {
-                       if (copy_to_user(buffer, ibuf, this_read)) {
-                               mutex_unlock(&rio500_mutex);
-                               return -EFAULT;
-                       }
-                       count -= this_read;
-                       read_count += this_read;
-                       buffer += this_read;
-               }
-       }
-       mutex_unlock(&rio500_mutex);
-       return read_count;
-}
-
-static const struct file_operations usb_rio_fops = {
-       .owner =        THIS_MODULE,
-       .read =         read_rio,
-       .write =        write_rio,
-       .unlocked_ioctl = ioctl_rio,
-       .open =         open_rio,
-       .release =      close_rio,
-       .llseek =       noop_llseek,
-};
-
-static struct usb_class_driver usb_rio_class = {
-       .name =         "rio500%d",
-       .fops =         &usb_rio_fops,
-       .minor_base =   RIO_MINOR,
-};
-
-static int probe_rio(struct usb_interface *intf,
-                    const struct usb_device_id *id)
-{
-       struct usb_device *dev = interface_to_usbdev(intf);
-       struct rio_usb_data *rio = &rio_instance;
-       int retval = -ENOMEM;
-       char *ibuf, *obuf;
-
-       if (rio->present) {
-               dev_info(&intf->dev, "Second USB Rio at address %d refused\n", dev->devnum);
-               return -EBUSY;
-       }
-       dev_info(&intf->dev, "USB Rio found at address %d\n", dev->devnum);
-
-       obuf = kmalloc(OBUF_SIZE, GFP_KERNEL);
-       if (!obuf) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the output buffer\n");
-               goto err_obuf;
-       }
-       dev_dbg(&intf->dev, "obuf address: %p\n", obuf);
-
-       ibuf = kmalloc(IBUF_SIZE, GFP_KERNEL);
-       if (!ibuf) {
-               dev_err(&dev->dev,
-                       "probe_rio: Not enough memory for the input buffer\n");
-               goto err_ibuf;
-       }
-       dev_dbg(&intf->dev, "ibuf address: %p\n", ibuf);
-
-       mutex_lock(&rio500_mutex);
-       rio->rio_dev = dev;
-       rio->ibuf = ibuf;
-       rio->obuf = obuf;
-       rio->present = 1;
-       mutex_unlock(&rio500_mutex);
-
-       retval = usb_register_dev(intf, &usb_rio_class);
-       if (retval) {
-               dev_err(&dev->dev,
-                       "Not able to get a minor for this device.\n");
-               goto err_register;
-       }
-
-       usb_set_intfdata(intf, rio);
-       return retval;
-
- err_register:
-       mutex_lock(&rio500_mutex);
-       rio->present = 0;
-       mutex_unlock(&rio500_mutex);
- err_ibuf:
-       kfree(obuf);
- err_obuf:
-       return retval;
-}
-
-static void disconnect_rio(struct usb_interface *intf)
-{
-       struct rio_usb_data *rio = usb_get_intfdata (intf);
-
-       usb_set_intfdata (intf, NULL);
-       if (rio) {
-               usb_deregister_dev(intf, &usb_rio_class);
-
-               mutex_lock(&rio500_mutex);
-               if (rio->isopen) {
-                       rio->isopen = 0;
-                       /* better let it finish - the release will do whats needed */
-                       rio->rio_dev = NULL;
-                       mutex_unlock(&rio500_mutex);
-                       return;
-               }
-               kfree(rio->ibuf);
-               kfree(rio->obuf);
-
-               dev_info(&intf->dev, "USB Rio disconnected.\n");
-
-               rio->present = 0;
-               mutex_unlock(&rio500_mutex);
-       }
-}
-
-static const struct usb_device_id rio_table[] = {
-       { USB_DEVICE(0x0841, 1) },              /* Rio 500 */
-       { }                                     /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE (usb, rio_table);
-
-static struct usb_driver rio_driver = {
-       .name =         "rio500",
-       .probe =        probe_rio,
-       .disconnect =   disconnect_rio,
-       .id_table =     rio_table,
-};
-
-module_usb_driver(rio_driver);
-
-MODULE_AUTHOR( DRIVER_AUTHOR );
-MODULE_DESCRIPTION( DRIVER_DESC );
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/usb/misc/rio500_usb.h b/drivers/usb/misc/rio500_usb.h
deleted file mode 100644 (file)
index 6db7a58..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*  ----------------------------------------------------------------------
-    Copyright (C) 2000  Cesar Miquel  (miquel@df.uba.ar)
-    ---------------------------------------------------------------------- */
-
-#define RIO_SEND_COMMAND                       0x1
-#define RIO_RECV_COMMAND                       0x2
-
-#define RIO_DIR_OUT                            0x0
-#define RIO_DIR_IN                             0x1
-
-struct RioCommand {
-       short length;
-       int request;
-       int requesttype;
-       int value;
-       int index;
-       void __user *buffer;
-       int timeout;
-};
index 9ba4a4e68d9140f450d4e58c51b96068bc716100..61e9e987fe4a94ba3841936e5adfa7aacc868fe9 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 #include <linux/uaccess.h>
 #include <linux/usb.h>
 
 #define IOCTL_GET_DRV_VERSION  2
 
 
-static DEFINE_MUTEX(lcd_mutex);
 static const struct usb_device_id id_table[] = {
        { .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
 
-static DEFINE_MUTEX(open_disc_mutex);
-
-
 struct usb_lcd {
        struct usb_device       *udev;                  /* init: probe_lcd */
        struct usb_interface    *interface;             /* the interface for
@@ -57,6 +54,8 @@ struct usb_lcd {
                                                           using up all RAM */
        struct usb_anchor       submitted;              /* URBs to wait for
                                                           before suspend */
+       struct rw_semaphore     io_rwsem;
+       unsigned long           disconnected:1;
 };
 #define to_lcd_dev(d) container_of(d, struct usb_lcd, kref)
 
@@ -81,40 +80,29 @@ static int lcd_open(struct inode *inode, struct file *file)
        struct usb_interface *interface;
        int subminor, r;
 
-       mutex_lock(&lcd_mutex);
        subminor = iminor(inode);
 
        interface = usb_find_interface(&lcd_driver, subminor);
        if (!interface) {
-               mutex_unlock(&lcd_mutex);
-               printk(KERN_ERR "USBLCD: %s - error, can't find device for minor %d\n",
+               pr_err("USBLCD: %s - error, can't find device for minor %d\n",
                       __func__, subminor);
                return -ENODEV;
        }
 
-       mutex_lock(&open_disc_mutex);
        dev = usb_get_intfdata(interface);
-       if (!dev) {
-               mutex_unlock(&open_disc_mutex);
-               mutex_unlock(&lcd_mutex);
-               return -ENODEV;
-       }
 
        /* increment our usage count for the device */
        kref_get(&dev->kref);
-       mutex_unlock(&open_disc_mutex);
 
        /* grab a power reference */
        r = usb_autopm_get_interface(interface);
        if (r < 0) {
                kref_put(&dev->kref, lcd_delete);
-               mutex_unlock(&lcd_mutex);
                return r;
        }
 
        /* save our object in the file's private structure */
        file->private_data = dev;
-       mutex_unlock(&lcd_mutex);
 
        return 0;
 }
@@ -142,6 +130,13 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
 
        dev = file->private_data;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto out_up_io;
+       }
+
        /* do a blocking bulk read to get data from the device */
        retval = usb_bulk_msg(dev->udev,
                              usb_rcvbulkpipe(dev->udev,
@@ -158,6 +153,9 @@ static ssize_t lcd_read(struct file *file, char __user * buffer,
                        retval = bytes_read;
        }
 
+out_up_io:
+       up_read(&dev->io_rwsem);
+
        return retval;
 }
 
@@ -173,14 +171,12 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case IOCTL_GET_HARD_VERSION:
-               mutex_lock(&lcd_mutex);
                bcdDevice = le16_to_cpu((dev->udev)->descriptor.bcdDevice);
                sprintf(buf, "%1d%1d.%1d%1d",
                        (bcdDevice & 0xF000)>>12,
                        (bcdDevice & 0xF00)>>8,
                        (bcdDevice & 0xF0)>>4,
                        (bcdDevice & 0xF));
-               mutex_unlock(&lcd_mutex);
                if (copy_to_user((void __user *)arg, buf, strlen(buf)) != 0)
                        return -EFAULT;
                break;
@@ -237,11 +233,18 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
        if (r < 0)
                return -EINTR;
 
+       down_read(&dev->io_rwsem);
+
+       if (dev->disconnected) {
+               retval = -ENODEV;
+               goto err_up_io;
+       }
+
        /* create a urb, and a buffer for it, and copy the data to the urb */
        urb = usb_alloc_urb(0, GFP_KERNEL);
        if (!urb) {
                retval = -ENOMEM;
-               goto err_no_buf;
+               goto err_up_io;
        }
 
        buf = usb_alloc_coherent(dev->udev, count, GFP_KERNEL,
@@ -278,6 +281,7 @@ static ssize_t lcd_write(struct file *file, const char __user * user_buffer,
           the USB core will eventually free it entirely */
        usb_free_urb(urb);
 
+       up_read(&dev->io_rwsem);
 exit:
        return count;
 error_unanchor:
@@ -285,7 +289,8 @@ error_unanchor:
 error:
        usb_free_coherent(dev->udev, count, buf, urb->transfer_dma);
        usb_free_urb(urb);
-err_no_buf:
+err_up_io:
+       up_read(&dev->io_rwsem);
        up(&dev->limit_sem);
        return retval;
 }
@@ -325,6 +330,7 @@ static int lcd_probe(struct usb_interface *interface,
 
        kref_init(&dev->kref);
        sema_init(&dev->limit_sem, USB_LCD_CONCURRENT_WRITES);
+       init_rwsem(&dev->io_rwsem);
        init_usb_anchor(&dev->submitted);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
@@ -365,7 +371,6 @@ static int lcd_probe(struct usb_interface *interface,
                /* something prevented us from registering this driver */
                dev_err(&interface->dev,
                        "Not able to get a minor for this device.\n");
-               usb_set_intfdata(interface, NULL);
                goto error;
        }
 
@@ -411,17 +416,18 @@ static int lcd_resume(struct usb_interface *intf)
 
 static void lcd_disconnect(struct usb_interface *interface)
 {
-       struct usb_lcd *dev;
+       struct usb_lcd *dev = usb_get_intfdata(interface);
        int minor = interface->minor;
 
-       mutex_lock(&open_disc_mutex);
-       dev = usb_get_intfdata(interface);
-       usb_set_intfdata(interface, NULL);
-       mutex_unlock(&open_disc_mutex);
-
        /* give back our minor */
        usb_deregister_dev(interface, &lcd_class);
 
+       down_write(&dev->io_rwsem);
+       dev->disconnected = 1;
+       up_write(&dev->io_rwsem);
+
+       usb_kill_anchored_urbs(&dev->submitted);
+
        /* decrement our usage count */
        kref_put(&dev->kref, lcd_delete);
 
index 6715a128e6c8b29bd52cb6c132b5eb0ecd459209..be0505b8b5d4e5515996faf25ef7485e978c9248 100644 (file)
@@ -60,6 +60,7 @@ struct usb_yurex {
 
        struct kref             kref;
        struct mutex            io_mutex;
+       unsigned long           disconnected:1;
        struct fasync_struct    *async_queue;
        wait_queue_head_t       waitq;
 
@@ -107,6 +108,7 @@ static void yurex_delete(struct kref *kref)
                                dev->int_buffer, dev->urb->transfer_dma);
                usb_free_urb(dev->urb);
        }
+       usb_put_intf(dev->interface);
        usb_put_dev(dev->udev);
        kfree(dev);
 }
@@ -132,6 +134,7 @@ static void yurex_interrupt(struct urb *urb)
        switch (status) {
        case 0: /*success*/
                break;
+       /* The device is terminated or messed up, give up */
        case -EOVERFLOW:
                dev_err(&dev->interface->dev,
                        "%s - overflow with length %d, actual length is %d\n",
@@ -140,12 +143,13 @@ static void yurex_interrupt(struct urb *urb)
        case -ENOENT:
        case -ESHUTDOWN:
        case -EILSEQ:
-               /* The device is terminated, clean up */
+       case -EPROTO:
+       case -ETIME:
                return;
        default:
                dev_err(&dev->interface->dev,
                        "%s - unknown status received: %d\n", __func__, status);
-               goto exit;
+               return;
        }
 
        /* handle received message */
@@ -177,7 +181,6 @@ static void yurex_interrupt(struct urb *urb)
                break;
        }
 
-exit:
        retval = usb_submit_urb(dev->urb, GFP_ATOMIC);
        if (retval) {
                dev_err(&dev->interface->dev, "%s - usb_submit_urb failed: %d\n",
@@ -204,7 +207,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
        init_waitqueue_head(&dev->waitq);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        iface_desc = interface->cur_altsetting;
@@ -315,8 +318,9 @@ static void yurex_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        usb_poison_urb(dev->urb);
+       usb_poison_urb(dev->cntl_urb);
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
        /* wakeup waiters */
@@ -404,7 +408,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
        dev = file->private_data;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                return -ENODEV;
        }
@@ -439,7 +443,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
                goto error;
 
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* already disconnected */
+       if (dev->disconnected) {                /* already disconnected */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
index c3d5c1206eec808bb96d54ef5cc713219c39c93e..9dd02160cca97e9242aac125d104df80cd72e3e3 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/platform_device.h>
 
 #include "mtu3.h"
+#include "mtu3_dr.h"
 #include "mtu3_debug.h"
 #include "mtu3_trace.h"
 
index 4c3de777ef6cf691abd606d0b98081c6f10143ce..a3c30b609433bacca844f3d0c0c174c6301e0d51 100644 (file)
@@ -162,17 +162,17 @@ void usbhs_usbreq_get_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
        req->bRequest           = (val >> 8) & 0xFF;
        req->bRequestType       = (val >> 0) & 0xFF;
 
-       req->wValue     = usbhs_read(priv, USBVAL);
-       req->wIndex     = usbhs_read(priv, USBINDX);
-       req->wLength    = usbhs_read(priv, USBLENG);
+       req->wValue     = cpu_to_le16(usbhs_read(priv, USBVAL));
+       req->wIndex     = cpu_to_le16(usbhs_read(priv, USBINDX));
+       req->wLength    = cpu_to_le16(usbhs_read(priv, USBLENG));
 }
 
 void usbhs_usbreq_set_val(struct usbhs_priv *priv, struct usb_ctrlrequest *req)
 {
        usbhs_write(priv, USBREQ,  (req->bRequest << 8) | req->bRequestType);
-       usbhs_write(priv, USBVAL,  req->wValue);
-       usbhs_write(priv, USBINDX, req->wIndex);
-       usbhs_write(priv, USBLENG, req->wLength);
+       usbhs_write(priv, USBVAL,  le16_to_cpu(req->wValue));
+       usbhs_write(priv, USBINDX, le16_to_cpu(req->wIndex));
+       usbhs_write(priv, USBLENG, le16_to_cpu(req->wLength));
 
        usbhs_bset(priv, DCPCTR, SUREQ, SUREQ);
 }
index d1a0a35ecfff32b5768d4840da16ee7d45610945..0824099b905e8935c3e9909399eab68c03517f71 100644 (file)
@@ -211,6 +211,7 @@ struct usbhs_priv;
 /* DCPCTR */
 #define BSTS           (1 << 15)       /* Buffer Status */
 #define SUREQ          (1 << 14)       /* Sending SETUP Token */
+#define INBUFM         (1 << 14)       /* (PIPEnCTR) Transfer Buffer Monitor */
 #define CSSTS          (1 << 12)       /* CSSTS Status */
 #define        ACLRM           (1 << 9)        /* Buffer Auto-Clear Mode */
 #define SQCLR          (1 << 8)        /* Toggle Bit Clear */
index 2a01ceb71641f672906f2f9f6863ee6835450f45..86637cd066cfb878718c0918b83883911bcb02b0 100644 (file)
@@ -89,7 +89,7 @@ static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
        list_del_init(&pkt->node);
 }
 
-static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
 {
        return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
 }
index 88d1816bcda22c8d989c7665cfe1fc8478ef1ae9..c3d3cc35cee0f640784d48b5f3a471a9608ac6cf 100644 (file)
@@ -97,5 +97,6 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
                    void *buf, int len, int zero, int sequence);
 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
 void usbhs_pkt_start(struct usbhs_pipe *pipe);
+struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe);
 
 #endif /* RENESAS_USB_FIFO_H */
index 4d571a5205e24fb61183e1d381b3cc3b830e73a8..cd38d74b322320db3c9607f4ab30758d701949be 100644 (file)
@@ -265,7 +265,7 @@ static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
        case USB_DEVICE_TEST_MODE:
                usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
                udelay(100);
-               usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+               usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex) >> 8);
                break;
        default:
                usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
@@ -315,7 +315,7 @@ static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
        struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
        struct device *dev = usbhsg_gpriv_to_dev(gpriv);
        struct usb_request *req;
-       unsigned short *buf;
+       __le16 *buf;
 
        /* alloc new usb_request for recip */
        req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
@@ -722,8 +722,7 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
        struct device *dev = usbhsg_gpriv_to_dev(gpriv);
        unsigned long flags;
-
-       usbhsg_pipe_disable(uep);
+       int ret = 0;
 
        dev_dbg(dev, "set halt %d (pipe %d)\n",
                halt, usbhs_pipe_number(pipe));
@@ -731,6 +730,18 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        /********************  spin lock ********************/
        usbhs_lock(priv, flags);
 
+       /*
+        * According to usb_ep_set_halt()'s description, this function should
+        * return -EAGAIN if the IN endpoint has any queue or data. Note
+        * that the usbhs_pipe_is_dir_in() returns false if the pipe is an
+        * IN endpoint in the gadget mode.
+        */
+       if (!usbhs_pipe_is_dir_in(pipe) && (__usbhsf_pkt_get(pipe) ||
+           usbhs_pipe_contains_transmittable_data(pipe))) {
+               ret = -EAGAIN;
+               goto out;
+       }
+
        if (halt)
                usbhs_pipe_stall(pipe);
        else
@@ -741,10 +752,11 @@ static int __usbhsg_ep_set_halt_wedge(struct usb_ep *ep, int halt, int wedge)
        else
                usbhsg_status_clr(gpriv, USBHSG_STATUS_WEDGE);
 
+out:
        usbhs_unlock(priv, flags);
        /********************  spin unlock ******************/
 
-       return 0;
+       return ret;
 }
 
 static int usbhsg_ep_set_halt(struct usb_ep *ep, int value)
index c4922b96c93bcec16e3b60010eed556306914d6c..9e5afdde1adbf8263e9a72bc67b8b7dea034085b 100644 (file)
@@ -277,6 +277,21 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe)
        return -EBUSY;
 }
 
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe)
+{
+       u16 val;
+
+       /* Do not support for DCP pipe */
+       if (usbhs_pipe_is_dcp(pipe))
+               return false;
+
+       val = usbhsp_pipectrl_get(pipe);
+       if (val & INBUFM)
+               return true;
+
+       return false;
+}
+
 /*
  *             PID ctrl
  */
index 3080423e600c7ec6777a950fde20823a72bfe17b..3b130529408ba9991aa4ef5d1f83c4040323eac1 100644 (file)
@@ -83,6 +83,7 @@ void usbhs_pipe_clear(struct usbhs_pipe *pipe);
 void usbhs_pipe_clear_without_sequence(struct usbhs_pipe *pipe,
                                       int needs_bfre, int bfre_enable);
 int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
+bool usbhs_pipe_contains_transmittable_data(struct usbhs_pipe *pipe);
 void usbhs_pipe_enable(struct usbhs_pipe *pipe);
 void usbhs_pipe_disable(struct usbhs_pipe *pipe);
 void usbhs_pipe_stall(struct usbhs_pipe *pipe);
index f0688c44b04c329d7df99ebfe9e2a9a8ed7b6527..25e81faf4c24aa35efc2ca4fa3c533d6054a30d8 100644 (file)
@@ -1030,6 +1030,9 @@ static const struct usb_device_id id_table_combined[] = {
        /* EZPrototypes devices */
        { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
        { USB_DEVICE_INTERFACE_NUMBER(UNJO_VID, UNJO_ISODEBUG_V1_PID, 1) },
+       /* Sienna devices */
+       { USB_DEVICE(FTDI_VID, FTDI_SIENNA_PID) },
+       { USB_DEVICE(ECHELON_VID, ECHELON_U20_PID) },
        { }                                     /* Terminating entry */
 };
 
index f12d806220b4a9c1d32abeaa350f4c705f98a509..22d66217cb412b99689ecc18f517066433fc337b 100644 (file)
@@ -39,6 +39,9 @@
 
 #define FTDI_LUMEL_PD12_PID    0x6002
 
+/* Sienna Serial Interface by Secyourit GmbH */
+#define FTDI_SIENNA_PID                0x8348
+
 /* Cyber Cortex AV by Fabulous Silicon (http://fabuloussilicon.com) */
 #define CYBER_CORTEX_AV_PID    0x8698
 
 #define BANDB_TTL3USB9M_PID    0xAC50
 #define BANDB_ZZ_PROG1_USB_PID 0xBA02
 
+/*
+ * Echelon USB Serial Interface
+ */
+#define ECHELON_VID            0x0920
+#define ECHELON_U20_PID                0x7500
+
 /*
  * Intrepid Control Systems (http://www.intrepidcs.com/) ValueCAN and NeoVI
  */
index d34779fe4a8d046e616cdb2e32c3258cda0c218f..e66a59ef43a1cb59cfa0d41b8af8c7316ededa69 100644 (file)
@@ -1741,8 +1741,8 @@ static struct urb *keyspan_setup_urb(struct usb_serial *serial, int endpoint,
 
        ep_desc = find_ep(serial, endpoint);
        if (!ep_desc) {
-               /* leak the urb, something's wrong and the callers don't care */
-               return urb;
+               usb_free_urb(urb);
+               return NULL;
        }
        if (usb_endpoint_xfer_int(ep_desc)) {
                ep_type_name = "INT";
index 38e920ac7f82092f996279c0918c88538f932175..06ab016be0b67c63b75da45eaa3452288ed0d1cb 100644 (file)
@@ -419,6 +419,7 @@ static void option_instat_callback(struct urb *urb);
 #define CINTERION_PRODUCT_PH8_AUDIO            0x0083
 #define CINTERION_PRODUCT_AHXX_2RMNET          0x0084
 #define CINTERION_PRODUCT_AHXX_AUDIO           0x0085
+#define CINTERION_PRODUCT_CLS8                 0x00b0
 
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
@@ -1154,6 +1155,14 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
        { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
          .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1050, 0xff),    /* Telit FN980 (rmnet) */
+         .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1051, 0xff),    /* Telit FN980 (MBIM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1052, 0xff),    /* Telit FN980 (RNDIS) */
+         .driver_info = NCTRL(2) | RSVD(3) },
+       { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1053, 0xff),    /* Telit FN980 (ECM) */
+         .driver_info = NCTRL(0) | RSVD(1) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
          .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
        { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -1847,6 +1856,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
        { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+       { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_CLS8, 0xff),
+         .driver_info = RSVD(0) | RSVD(4) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
        { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
index dd0ad67aa71e619907afdab6dc142a4335684e51..ef23acc9b9ce4bc8fe8ccb8197b42f69928a87b3 100644 (file)
@@ -776,7 +776,6 @@ static void ti_close(struct usb_serial_port *port)
        struct ti_port *tport;
        int port_number;
        int status;
-       int do_unlock;
        unsigned long flags;
 
        tdev = usb_get_serial_data(port->serial);
@@ -800,16 +799,13 @@ static void ti_close(struct usb_serial_port *port)
                        "%s - cannot send close port command, %d\n"
                                                        , __func__, status);
 
-       /* if mutex_lock is interrupted, continue anyway */
-       do_unlock = !mutex_lock_interruptible(&tdev->td_open_close_lock);
-       --tport->tp_tdev->td_open_port_count;
-       if (tport->tp_tdev->td_open_port_count <= 0) {
+       mutex_lock(&tdev->td_open_close_lock);
+       --tdev->td_open_port_count;
+       if (tdev->td_open_port_count == 0) {
                /* last port is closed, shut down interrupt urb */
                usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
-               tport->tp_tdev->td_open_port_count = 0;
        }
-       if (do_unlock)
-               mutex_unlock(&tdev->td_open_close_lock);
+       mutex_unlock(&tdev->td_open_close_lock);
 }
 
 
index a3179fea38c87b51ca54c85635eff0acb86a8d61..8f066bb55d7d33eb18b8f43ba845c632ed100e2a 100644 (file)
@@ -314,10 +314,7 @@ static void serial_cleanup(struct tty_struct *tty)
        serial = port->serial;
        owner = serial->type->driver.owner;
 
-       mutex_lock(&serial->disc_mutex);
-       if (!serial->disconnected)
-               usb_autopm_put_interface(serial->interface);
-       mutex_unlock(&serial->disc_mutex);
+       usb_autopm_put_interface(serial->interface);
 
        usb_serial_put(serial);
        module_put(owner);
index 79314d8c94a446b9378aa7d5ebcb9a533e31cf1e..ca3bd58f2025a9957742970122cacd997ba67085 100644 (file)
@@ -559,6 +559,10 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
 
        command_port = port->serial->port[COMMAND_PORT];
        command_info = usb_get_serial_port_data(command_port);
+
+       if (command_port->bulk_out_size < datasize + 1)
+               return -EIO;
+
        mutex_lock(&command_info->mutex);
        command_info->command_finished = false;
 
@@ -632,6 +636,7 @@ static void firm_setup_port(struct tty_struct *tty)
        struct device *dev = &port->dev;
        struct whiteheat_port_settings port_settings;
        unsigned int cflag = tty->termios.c_cflag;
+       speed_t baud;
 
        port_settings.port = port->port_number + 1;
 
@@ -692,11 +697,13 @@ static void firm_setup_port(struct tty_struct *tty)
        dev_dbg(dev, "%s - XON = %2x, XOFF = %2x\n", __func__, port_settings.xon, port_settings.xoff);
 
        /* get the baud rate wanted */
-       port_settings.baud = tty_get_baud_rate(tty);
-       dev_dbg(dev, "%s - baud rate = %d\n", __func__, port_settings.baud);
+       baud = tty_get_baud_rate(tty);
+       port_settings.baud = cpu_to_le32(baud);
+       dev_dbg(dev, "%s - baud rate = %u\n", __func__, baud);
 
        /* fixme: should set validated settings */
-       tty_encode_baud_rate(tty, port_settings.baud, port_settings.baud);
+       tty_encode_baud_rate(tty, baud, baud);
+
        /* handle any settings that aren't specified in the tty structure */
        port_settings.lloop = 0;
 
index 00398149cd8ddcf0985dd363d24d66415196cdd8..269e727a92f9c5bbe7c2825d988c06f0e640491c 100644 (file)
@@ -87,7 +87,7 @@ struct whiteheat_simple {
 
 struct whiteheat_port_settings {
        __u8    port;           /* port number (1 to N) */
-       __u32   baud;           /* any value 7 - 460800, firmware calculates
+       __le32  baud;           /* any value 7 - 460800, firmware calculates
                                   best fit; arrives little endian */
        __u8    bits;           /* 5, 6, 7, or 8 */
        __u8    stop;           /* 1 or 2, default 1 (2 = 1.5 if bits = 5) */
index 6737fab9495949c0187364cfce1e7469687da9fa..54a3c8195c969785a6ab2a359a2787f321cce553 100644 (file)
@@ -68,7 +68,6 @@ static const char* host_info(struct Scsi_Host *host)
 static int slave_alloc (struct scsi_device *sdev)
 {
        struct us_data *us = host_to_us(sdev->host);
-       int maxp;
 
        /*
         * Set the INQUIRY transfer length to 36.  We don't use any of
@@ -77,15 +76,6 @@ static int slave_alloc (struct scsi_device *sdev)
         */
        sdev->inquiry_len = 36;
 
-       /*
-        * USB has unusual scatter-gather requirements: the length of each
-        * scatterlist element except the last must be divisible by the
-        * Bulk maxpacket value.  Fortunately this value is always a
-        * power of 2.  Inform the block layer about this requirement.
-        */
-       maxp = usb_maxpacket(us->pusb_dev, us->recv_bulk_pipe, 0);
-       blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
        /*
         * Some host controllers may have alignment requirements.
         * We'll play it safe by requiring 512-byte alignment always.
index bf80d6f81f587f4269b2bf7a96d74099f63e9d15..34538253f12cdf6d4213167543b24ed207267f78 100644 (file)
@@ -789,29 +789,9 @@ static int uas_slave_alloc(struct scsi_device *sdev)
 {
        struct uas_dev_info *devinfo =
                (struct uas_dev_info *)sdev->host->hostdata;
-       int maxp;
 
        sdev->hostdata = devinfo;
 
-       /*
-        * We have two requirements here. We must satisfy the requirements
-        * of the physical HC and the demands of the protocol, as we
-        * definitely want no additional memory allocation in this path
-        * ruling out using bounce buffers.
-        *
-        * For a transmission on USB to continue we must never send
-        * a package that is smaller than maxpacket. Hence the length of each
-         * scatterlist element except the last must be divisible by the
-         * Bulk maxpacket value.
-        * If the HC does not ensure that through SG,
-        * the upper layer must do that. We must assume nothing
-        * about the capabilities off the HC, so we use the most
-        * pessimistic requirement.
-        */
-
-       maxp = usb_maxpacket(devinfo->udev, devinfo->data_in_pipe, 0);
-       blk_queue_virt_boundary(sdev->request_queue, maxp - 1);
-
        /*
         * The protocol has no requirements on alignment in the strict sense.
         * Controllers may or may not have alignment restrictions.
index 96562744101c8122a870e5a24928741ca2c66b0d..5f61d9977a15fe0e926548100517a35827087447 100644 (file)
@@ -4409,18 +4409,20 @@ static int tcpm_fw_get_caps(struct tcpm_port *port,
        /* USB data support is optional */
        ret = fwnode_property_read_string(fwnode, "data-role", &cap_str);
        if (ret == 0) {
-               port->typec_caps.data = typec_find_port_data_role(cap_str);
-               if (port->typec_caps.data < 0)
-                       return -EINVAL;
+               ret = typec_find_port_data_role(cap_str);
+               if (ret < 0)
+                       return ret;
+               port->typec_caps.data = ret;
        }
 
        ret = fwnode_property_read_string(fwnode, "power-role", &cap_str);
        if (ret < 0)
                return ret;
 
-       port->typec_caps.type = typec_find_port_power_role(cap_str);
-       if (port->typec_caps.type < 0)
-               return -EINVAL;
+       ret = typec_find_port_power_role(cap_str);
+       if (ret < 0)
+               return ret;
+       port->typec_caps.type = ret;
        port->port_type = port->typec_caps.type;
 
        if (port->port_type == TYPEC_PORT_SNK)
index 6c103697c5823af4b2ccab553fae8a1a404888ff..d99700cb4dcada5ec6635f29b0a56298a0849b8a 100644 (file)
@@ -75,6 +75,8 @@ static int ucsi_displayport_enter(struct typec_altmode *alt)
 
        if (cur != 0xff) {
                mutex_unlock(&dp->con->lock);
+               if (dp->con->port_altmode[cur] == alt)
+                       return 0;
                return -EBUSY;
        }
 
index 907e20e1a71ee792079dd107f14dd869328bd41e..d772fce5190570da226a6cbb67ce4424765109db 100644 (file)
@@ -195,7 +195,6 @@ struct ucsi_ccg {
 
        /* fw build with vendor information */
        u16 fw_build;
-       bool run_isr; /* flag to call ISR routine during resume */
        struct work_struct pm_work;
 };
 
@@ -224,18 +223,6 @@ static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
        if (quirks && quirks->max_read_len)
                max_read_len = quirks->max_read_len;
 
-       if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               /*
-                * Do not schedule pm_work to run ISR in
-                * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
-                * since we are already in ISR path.
-                */
-               uc->run_isr = false;
-               mutex_unlock(&uc->lock);
-       }
-
        pm_runtime_get_sync(uc->dev);
        while (rem_len > 0) {
                msgs[1].buf = &data[len - rem_len];
@@ -278,18 +265,6 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
        msgs[0].len = len + sizeof(rab);
        msgs[0].buf = buf;
 
-       if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               /*
-                * Do not schedule pm_work to run ISR in
-                * ucsi_ccg_runtime_resume() after pm_runtime_get_sync()
-                * since we are already in ISR path.
-                */
-               uc->run_isr = false;
-               mutex_unlock(&uc->lock);
-       }
-
        pm_runtime_get_sync(uc->dev);
        status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
        if (status < 0) {
@@ -1130,7 +1105,6 @@ static int ucsi_ccg_probe(struct i2c_client *client,
        uc->ppm.sync = ucsi_ccg_sync;
        uc->dev = dev;
        uc->client = client;
-       uc->run_isr = true;
        mutex_init(&uc->lock);
        INIT_WORK(&uc->work, ccg_update_firmware);
        INIT_WORK(&uc->pm_work, ccg_pm_workaround_work);
@@ -1188,6 +1162,8 @@ static int ucsi_ccg_probe(struct i2c_client *client,
 
        pm_runtime_set_active(uc->dev);
        pm_runtime_enable(uc->dev);
+       pm_runtime_use_autosuspend(uc->dev);
+       pm_runtime_set_autosuspend_delay(uc->dev, 5000);
        pm_runtime_idle(uc->dev);
 
        return 0;
@@ -1229,7 +1205,6 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct ucsi_ccg *uc = i2c_get_clientdata(client);
-       bool schedule = true;
 
        /*
         * Firmware version 3.1.10 or earlier, built for NVIDIA has known issue
@@ -1237,17 +1212,8 @@ static int ucsi_ccg_runtime_resume(struct device *dev)
         * Schedule a work to call ISR as a workaround.
         */
        if (uc->fw_build == CCG_FW_BUILD_NVIDIA &&
-           uc->fw_version <= CCG_OLD_FW_VERSION) {
-               mutex_lock(&uc->lock);
-               if (!uc->run_isr) {
-                       uc->run_isr = true;
-                       schedule = false;
-               }
-               mutex_unlock(&uc->lock);
-
-               if (schedule)
-                       schedule_work(&uc->pm_work);
-       }
+           uc->fw_version <= CCG_OLD_FW_VERSION)
+               schedule_work(&uc->pm_work);
 
        return 0;
 }
index c31d17d05810c933f3098187023627d28e2ea61d..2dc58766273a721cbedbd54fd28452f7ba1308b0 100644 (file)
@@ -61,6 +61,7 @@ struct usb_skel {
        spinlock_t              err_lock;               /* lock for errors */
        struct kref             kref;
        struct mutex            io_mutex;               /* synchronize I/O with disconnect */
+       unsigned long           disconnected:1;
        wait_queue_head_t       bulk_in_wait;           /* to wait for an ongoing read */
 };
 #define to_skel_dev(d) container_of(d, struct usb_skel, kref)
@@ -73,6 +74,7 @@ static void skel_delete(struct kref *kref)
        struct usb_skel *dev = to_skel_dev(kref);
 
        usb_free_urb(dev->bulk_in_urb);
+       usb_put_intf(dev->interface);
        usb_put_dev(dev->udev);
        kfree(dev->bulk_in_buffer);
        kfree(dev);
@@ -124,10 +126,7 @@ static int skel_release(struct inode *inode, struct file *file)
                return -ENODEV;
 
        /* allow the device to be autosuspended */
-       mutex_lock(&dev->io_mutex);
-       if (dev->interface)
-               usb_autopm_put_interface(dev->interface);
-       mutex_unlock(&dev->io_mutex);
+       usb_autopm_put_interface(dev->interface);
 
        /* decrement the count on our device */
        kref_put(&dev->kref, skel_delete);
@@ -231,8 +230,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
 
        dev = file->private_data;
 
-       /* if we cannot read at all, return EOF */
-       if (!dev->bulk_in_urb || !count)
+       if (!count)
                return 0;
 
        /* no concurrent readers */
@@ -240,7 +238,7 @@ static ssize_t skel_read(struct file *file, char *buffer, size_t count,
        if (rv < 0)
                return rv;
 
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                rv = -ENODEV;
                goto exit;
        }
@@ -422,7 +420,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
 
        /* this lock makes sure we don't submit URBs to gone devices */
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {          /* disconnect() was called */
+       if (dev->disconnected) {                /* disconnect() was called */
                mutex_unlock(&dev->io_mutex);
                retval = -ENODEV;
                goto error;
@@ -507,7 +505,7 @@ static int skel_probe(struct usb_interface *interface,
        init_waitqueue_head(&dev->bulk_in_wait);
 
        dev->udev = usb_get_dev(interface_to_usbdev(interface));
-       dev->interface = interface;
+       dev->interface = usb_get_intf(interface);
 
        /* set up the endpoint information */
        /* use only the first bulk-in and bulk-out endpoints */
@@ -573,9 +571,10 @@ static void skel_disconnect(struct usb_interface *interface)
 
        /* prevent more I/O from starting */
        mutex_lock(&dev->io_mutex);
-       dev->interface = NULL;
+       dev->disconnected = 1;
        mutex_unlock(&dev->io_mutex);
 
+       usb_kill_urb(dev->bulk_in_urb);
        usb_kill_anchored_urbs(&dev->submitted);
 
        /* decrement our usage count */
index 585a84d319bdaa080e6f15406502e3b2bb38e998..65850e9c719055218545d07505f9febe75810066 100644 (file)
@@ -1195,12 +1195,12 @@ static int vhci_start(struct usb_hcd *hcd)
        if (id == 0 && usb_hcd_is_primary_hcd(hcd)) {
                err = vhci_init_attr_group();
                if (err) {
-                       pr_err("init attr group\n");
+                       dev_err(hcd_dev(hcd), "init attr group failed, err = %d\n", err);
                        return err;
                }
                err = sysfs_create_group(&hcd_dev(hcd)->kobj, &vhci_attr_group);
                if (err) {
-                       pr_err("create sysfs files\n");
+                       dev_err(hcd_dev(hcd), "create sysfs files failed, err = %d\n", err);
                        vhci_finish_attr_group();
                        return err;
                }
index c3803785f6eff23a74c2225cd696a7a69ea1a757..0ae40a13a9fea89650c4200a359530dd7c954281 100644 (file)
@@ -147,7 +147,10 @@ static int vhci_send_cmd_submit(struct vhci_device *vdev)
                }
 
                kfree(iov);
+               /* This is only for isochronous case */
                kfree(iso_buffer);
+               iso_buffer = NULL;
+
                usbip_dbg_vhci_tx("send txdata\n");
 
                total_size += txsize;
index 96fddc1dafc3c4dffd62bf98df51e48a0598febc..d864277ea16f31627ca776769297788995840400 100644 (file)
@@ -1658,7 +1658,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
        struct bus_type *bus = NULL;
        int ret;
        bool resv_msi, msi_remap;
-       phys_addr_t resv_msi_base;
+       phys_addr_t resv_msi_base = 0;
        struct iommu_domain_geometry geo;
        LIST_HEAD(iova_copy);
        LIST_HEAD(group_resv_regions);
index 7804869c6a31398ffbcc9cd1c5786b76f1f54274..056308008288c80dc14efbf6c437cbc6940129dd 100644 (file)
@@ -161,6 +161,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
 
        vhost_test_stop(n, &private);
        vhost_test_flush(n);
+       vhost_dev_stop(&n->dev);
        vhost_dev_cleanup(&n->dev);
        /* We do an extra flush before freeing memory,
         * since jobs can re-queue themselves. */
@@ -237,6 +238,7 @@ static long vhost_test_reset_owner(struct vhost_test *n)
        }
        vhost_test_stop(n, &priv);
        vhost_test_flush(n);
+       vhost_dev_stop(&n->dev);
        vhost_dev_reset_owner(&n->dev, umem);
 done:
        mutex_unlock(&n->dev.mutex);
index 08ad0d1f04766fe10b8a5829628983ec09f0b561..a0a2d74967ef57eb72f73a3bb5367177421ed2d6 100644 (file)
@@ -852,6 +852,12 @@ static inline int xfer_kern(void *src, void *dst, size_t len)
        return 0;
 }
 
+static inline int kern_xfer(void *dst, void *src, size_t len)
+{
+       memcpy(dst, src, len);
+       return 0;
+}
+
 /**
  * vringh_init_kern - initialize a vringh for a kernelspace vring.
  * @vrh: the vringh to initialize.
@@ -958,7 +964,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
 ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
                             const void *src, size_t len)
 {
-       return vringh_iov_xfer(wiov, (void *)src, len, xfer_kern);
+       return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
 }
 EXPORT_SYMBOL(vringh_iov_push_kern);
 
index 228a89b9bdd135f9e029d5b132909d92dda858c6..16f60c1e1766a55061528889caefc097e42753e1 100644 (file)
@@ -18,23 +18,6 @@ obj-$(CONFIG_SPU_BASE)                       += logo_spe_clut224.o
 
 # How to generate logo's
 
-# Use logo-cfiles to retrieve list of .c files to be built
-logo-cfiles = $(notdir $(patsubst %.$(2), %.c, \
-              $(wildcard $(srctree)/$(src)/*$(1).$(2))))
-
-
-# Mono logos
-extra-y += $(call logo-cfiles,_mono,pbm)
-
-# VGA16 logos
-extra-y += $(call logo-cfiles,_vga16,ppm)
-
-# 224 Logos
-extra-y += $(call logo-cfiles,_clut224,ppm)
-
-# Gray 256
-extra-y += $(call logo-cfiles,_gray256,pgm)
-
 pnmtologo := scripts/pnmtologo
 
 # Create commands like "pnmtologo -t mono -n logo_mac_mono -o ..."
@@ -55,5 +38,5 @@ $(obj)/%_clut224.c: $(src)/%_clut224.ppm $(pnmtologo) FORCE
 $(obj)/%_gray256.c: $(src)/%_gray256.pgm $(pnmtologo) FORCE
        $(call if_changed,logo)
 
-# Files generated that shall be removed upon make clean
-clean-files := *.o *_mono.c *_vga16.c *_clut224.c *_gray256.c
+# generated C files
+targets += *_mono.c *_vga16.c *_clut224.c *_gray256.c
index 75fd140b02ff8aa41816a1f0284a4926c214df7e..43c391626a000d98bb6461d274cd2317d98e2913 100644 (file)
@@ -220,6 +220,8 @@ static int hgcm_call_preprocess_linaddr(
        if (!bounce_buf)
                return -ENOMEM;
 
+       *bounce_buf_ret = bounce_buf;
+
        if (copy_in) {
                ret = copy_from_user(bounce_buf, (void __user *)buf, len);
                if (ret)
@@ -228,7 +230,6 @@ static int hgcm_call_preprocess_linaddr(
                memset(bounce_buf, 0, len);
        }
 
-       *bounce_buf_ret = bounce_buf;
        hgcm_call_add_pagelist_size(bounce_buf, len, extra);
        return 0;
 }
index bdc08244a648dbf0ee58fe8b9dc4e5e36c15dcb8..a8041e451e9ec6ba9982069477520d5df00386c2 100644 (file)
@@ -1499,9 +1499,6 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
                 * counter first before updating event flags.
                 */
                virtio_wmb(vq->weak_barriers);
-       } else {
-               used_idx = vq->last_used_idx;
-               wrap_counter = vq->packed.used_wrap_counter;
        }
 
        if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
@@ -1518,7 +1515,9 @@ static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
         */
        virtio_mb(vq->weak_barriers);
 
-       if (is_used_desc_packed(vq, used_idx, wrap_counter)) {
+       if (is_used_desc_packed(vq,
+                               vq->last_used_idx,
+                               vq->packed.used_wrap_counter)) {
                END_USE(vq);
                return false;
        }
index ebed495b9e692cfbce260201397a8546c3d0f553..b7847636501dff78838089826b37ce9db4ffc469 100644 (file)
@@ -103,6 +103,7 @@ config W1_SLAVE_DS2438
 
 config W1_SLAVE_DS250X
        tristate "512b/1kb/16kb EPROM family support"
+       select CRC16
        help
          Say Y here if you want to use a 1-wire
          512b/1kb/16kb EPROM family device (DS250x).
index 4e11de6cde81f398ead5005c3ca003d2e1fbcfc6..5bae515c8e25c0a4f1c90b42e24b271a5777ef84 100644 (file)
@@ -156,8 +156,10 @@ static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
        (GFP_HIGHUSER | __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC)
 
 /* balloon_append: add the given page to the balloon. */
-static void __balloon_append(struct page *page)
+static void balloon_append(struct page *page)
 {
+       __SetPageOffline(page);
+
        /* Lowmem is re-populated first, so highmem pages go at list tail. */
        if (PageHighMem(page)) {
                list_add_tail(&page->lru, &ballooned_pages);
@@ -169,11 +171,6 @@ static void __balloon_append(struct page *page)
        wake_up(&balloon_wq);
 }
 
-static void balloon_append(struct page *page)
-{
-       __balloon_append(page);
-}
-
 /* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
 static struct page *balloon_retrieve(bool require_lowmem)
 {
@@ -192,6 +189,7 @@ static struct page *balloon_retrieve(bool require_lowmem)
        else
                balloon_stats.balloon_low--;
 
+       __ClearPageOffline(page);
        return page;
 }
 
@@ -377,8 +375,7 @@ static void xen_online_page(struct page *page, unsigned int order)
        for (i = 0; i < size; i++) {
                p = pfn_to_page(start_pfn + i);
                __online_page_set_limits(p);
-               __SetPageOffline(p);
-               __balloon_append(p);
+               balloon_append(p);
        }
        mutex_unlock(&balloon_mutex);
 }
@@ -444,7 +441,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
                xenmem_reservation_va_mapping_update(1, &page, &frame_list[i]);
 
                /* Relinquish the page back to the allocator. */
-               __ClearPageOffline(page);
                free_reserved_page(page);
        }
 
@@ -471,7 +467,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                        state = BP_EAGAIN;
                        break;
                }
-               __SetPageOffline(page);
                adjust_managed_page_count(page, -1);
                xenmem_reservation_scrub_page(page);
                list_add(&page->lru, &pages);
@@ -611,7 +606,6 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages)
        while (pgno < nr_pages) {
                page = balloon_retrieve(true);
                if (page) {
-                       __ClearPageOffline(page);
                        pages[pgno++] = page;
 #ifdef CONFIG_XEN_HAVE_PVMMU
                        /*
@@ -653,10 +647,8 @@ void free_xenballooned_pages(int nr_pages, struct page **pages)
        mutex_lock(&balloon_mutex);
 
        for (i = 0; i < nr_pages; i++) {
-               if (pages[i]) {
-                       __SetPageOffline(pages[i]);
+               if (pages[i])
                        balloon_append(pages[i]);
-               }
        }
 
        balloon_stats.target_unpopulated -= nr_pages;
@@ -674,7 +666,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
                                      unsigned long pages)
 {
        unsigned long pfn, extra_pfn_end;
-       struct page *page;
 
        /*
         * If the amount of usable memory has been limited (e.g., with
@@ -684,11 +675,10 @@ static void __init balloon_add_region(unsigned long start_pfn,
        extra_pfn_end = min(max_pfn, start_pfn + pages);
 
        for (pfn = start_pfn; pfn < extra_pfn_end; pfn++) {
-               page = pfn_to_page(pfn);
                /* totalram_pages and totalhigh_pages do not
                   include the boot-time balloon extension, so
                   don't subtract from it. */
-               __balloon_append(page);
+               balloon_append(pfn_to_page(pfn));
        }
 
        balloon_stats.total_pages += extra_pfn_end - start_pfn;
index 89d60f8e3c180835fb7e7e9091210bfbf7db7b34..d1ff2186ebb48a7c0981ecb6d4afcbbb25ffcea0 100644 (file)
@@ -40,7 +40,7 @@
 
 #define efi_data(op)   (op.u.efi_runtime_call)
 
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+static efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_time);
 
@@ -61,9 +61,8 @@ efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_time);
 
-efi_status_t xen_efi_set_time(efi_time_t *tm)
+static efi_status_t xen_efi_set_time(efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_time);
 
@@ -75,10 +74,10 @@ efi_status_t xen_efi_set_time(efi_time_t *tm)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_time);
 
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
-                                    efi_time_t *tm)
+static efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled,
+                                           efi_bool_t *pending,
+                                           efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_wakeup_time);
 
@@ -98,9 +97,8 @@ efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_wakeup_time);
 
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+static efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_wakeup_time);
 
@@ -117,11 +115,10 @@ efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_wakeup_time);
 
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 *attr, unsigned long *data_size,
-                                 void *data)
+static efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+                                        u32 *attr, unsigned long *data_size,
+                                        void *data)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_variable);
 
@@ -141,11 +138,10 @@ efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_variable);
 
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
-                                      efi_char16_t *name,
-                                      efi_guid_t *vendor)
+static efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
+                                             efi_char16_t *name,
+                                             efi_guid_t *vendor)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_next_variable_name);
 
@@ -165,11 +161,10 @@ efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_next_variable);
 
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                u32 attr, unsigned long data_size,
-                                void *data)
+static efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+                                        u32 attr, unsigned long data_size,
+                                        void *data)
 {
        struct xen_platform_op op = INIT_EFI_OP(set_variable);
 
@@ -186,11 +181,10 @@ efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_set_variable);
 
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
-                                        u64 *remaining_space,
-                                        u64 *max_variable_size)
+static efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
+                                               u64 *remaining_space,
+                                               u64 *max_variable_size)
 {
        struct xen_platform_op op = INIT_EFI_OP(query_variable_info);
 
@@ -208,9 +202,8 @@ efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_query_variable_info);
 
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
+static efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
 {
        struct xen_platform_op op = INIT_EFI_OP(get_next_high_monotonic_count);
 
@@ -221,10 +214,9 @@ efi_status_t xen_efi_get_next_high_mono_count(u32 *count)
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_get_next_high_mono_count);
 
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
-                                   unsigned long count, unsigned long sg_list)
+static efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
+                               unsigned long count, unsigned long sg_list)
 {
        struct xen_platform_op op = INIT_EFI_OP(update_capsule);
 
@@ -241,11 +233,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_update_capsule);
 
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
-                                       unsigned long count, u64 *max_size,
-                                       int *reset_type)
+static efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
+                       unsigned long count, u64 *max_size, int *reset_type)
 {
        struct xen_platform_op op = INIT_EFI_OP(query_capsule_capabilities);
 
@@ -264,10 +254,9 @@ efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
 
        return efi_data(op).status;
 }
-EXPORT_SYMBOL_GPL(xen_efi_query_capsule_caps);
 
-void xen_efi_reset_system(int reset_type, efi_status_t status,
-                         unsigned long data_size, efi_char16_t *data)
+static void xen_efi_reset_system(int reset_type, efi_status_t status,
+                                unsigned long data_size, efi_char16_t *data)
 {
        switch (reset_type) {
        case EFI_RESET_COLD:
@@ -281,4 +270,25 @@ void xen_efi_reset_system(int reset_type, efi_status_t status,
                BUG();
        }
 }
-EXPORT_SYMBOL_GPL(xen_efi_reset_system);
+
+/*
+ * Set XEN EFI runtime services function pointers. Other fields of struct efi,
+ * e.g. efi.systab, will be set like normal EFI.
+ */
+void __init xen_efi_runtime_setup(void)
+{
+       efi.get_time                    = xen_efi_get_time;
+       efi.set_time                    = xen_efi_set_time;
+       efi.get_wakeup_time             = xen_efi_get_wakeup_time;
+       efi.set_wakeup_time             = xen_efi_set_wakeup_time;
+       efi.get_variable                = xen_efi_get_variable;
+       efi.get_next_variable           = xen_efi_get_next_variable;
+       efi.set_variable                = xen_efi_set_variable;
+       efi.set_variable_nonblocking    = xen_efi_set_variable;
+       efi.query_variable_info         = xen_efi_query_variable_info;
+       efi.query_variable_info_nonblocking = xen_efi_query_variable_info;
+       efi.update_capsule              = xen_efi_update_capsule;
+       efi.query_capsule_caps          = xen_efi_query_capsule_caps;
+       efi.get_next_high_mono_count    = xen_efi_get_next_high_mono_count;
+       efi.reset_system                = xen_efi_reset_system;
+}
index a446a7221e13e9ac564ddf446e7fce84f8587420..81401f386c9ce06d64a8fdab2c54f26f075ac8a6 100644 (file)
@@ -22,6 +22,7 @@
 
 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 
+#include <linux/dma-mapping.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -34,9 +35,6 @@
 #include <linux/slab.h>
 #include <linux/highmem.h>
 #include <linux/refcount.h>
-#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
-#include <linux/of_device.h>
-#endif
 
 #include <xen/xen.h>
 #include <xen/grant_table.h>
@@ -625,14 +623,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
        flip->private_data = priv;
 #ifdef CONFIG_XEN_GRANT_DMA_ALLOC
        priv->dma_dev = gntdev_miscdev.this_device;
-
-       /*
-        * The device is not spawn from a device tree, so arch_setup_dma_ops
-        * is not called, thus leaving the device with dummy DMA ops.
-        * Fix this by calling of_dma_configure() with a NULL node to set
-        * default DMA ops.
-        */
-       of_dma_configure(priv->dma_dev, NULL, true);
+       dma_coerce_mask_and_coherent(priv->dma_dev, DMA_BIT_MASK(64));
 #endif
        pr_debug("priv %p\n", priv);
 
index 7ea6fb6a2e5dd78c53a79bfea140e2e297858f7d..49b381e104efaf64469c75e35668e10efac4ba4d 100644 (file)
@@ -1363,8 +1363,7 @@ static int gnttab_setup(void)
        if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
                gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
                if (gnttab_shared.addr == NULL) {
-                       pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
-                               (unsigned long)xen_auto_xlat_grant_frames.vaddr);
+                       pr_warn("gnttab share frames is not mapped!\n");
                        return -ENOMEM;
                }
        }
index 69a626b0e59420443b3972426c2d8be19714589c..c57c71b7d53dbab128c27bfdaadc4ac218469eb2 100644 (file)
@@ -775,7 +775,7 @@ static int pvcalls_back_poll(struct xenbus_device *dev,
        mappass->reqcopy = *req;
        icsk = inet_csk(mappass->sock->sk);
        queue = &icsk->icsk_accept_queue;
-       data = queue->rskq_accept_head != NULL;
+       data = READ_ONCE(queue->rskq_accept_head) != NULL;
        if (data) {
                mappass->reqcopy.cmd = 0;
                ret = 0;
index 08adc590f6314b2b8c72b24e3a5fdd161b3d0229..597af455a522b1c01b6f5c65e7417fc8da553a1c 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/miscdevice.h>
+#include <linux/workqueue.h>
 
 #include <xen/xenbus.h>
 #include <xen/xen.h>
@@ -116,6 +117,8 @@ struct xenbus_file_priv {
        wait_queue_head_t read_waitq;
 
        struct kref kref;
+
+       struct work_struct wq;
 };
 
 /* Read out any raw xenbus messages queued up. */
@@ -300,14 +303,14 @@ static void watch_fired(struct xenbus_watch *watch,
        mutex_unlock(&adap->dev_data->reply_mutex);
 }
 
-static void xenbus_file_free(struct kref *kref)
+static void xenbus_worker(struct work_struct *wq)
 {
        struct xenbus_file_priv *u;
        struct xenbus_transaction_holder *trans, *tmp;
        struct watch_adapter *watch, *tmp_watch;
        struct read_buffer *rb, *tmp_rb;
 
-       u = container_of(kref, struct xenbus_file_priv, kref);
+       u = container_of(wq, struct xenbus_file_priv, wq);
 
        /*
         * No need for locking here because there are no other users,
@@ -333,6 +336,18 @@ static void xenbus_file_free(struct kref *kref)
        kfree(u);
 }
 
+static void xenbus_file_free(struct kref *kref)
+{
+       struct xenbus_file_priv *u;
+
+       /*
+        * We might be called in xenbus_thread().
+        * Use workqueue to avoid deadlock.
+        */
+       u = container_of(kref, struct xenbus_file_priv, kref);
+       schedule_work(&u->wq);
+}
+
 static struct xenbus_transaction_holder *xenbus_get_transaction(
        struct xenbus_file_priv *u, uint32_t tx_id)
 {
@@ -650,6 +665,7 @@ static int xenbus_file_open(struct inode *inode, struct file *filp)
        INIT_LIST_HEAD(&u->watches);
        INIT_LIST_HEAD(&u->read_buffers);
        init_waitqueue_head(&u->read_waitq);
+       INIT_WORK(&u->wq, xenbus_worker);
 
        mutex_init(&u->reply_mutex);
        mutex_init(&u->msgbuffer_mutex);
index ad4c6b1d5074aa9cab8de782d000433f3410a778..c5642bcb6b468f9ed4e4fe2ff7012eb4fe024f9f 100644 (file)
@@ -879,7 +879,7 @@ out_free_interp:
           the correct location in memory. */
        for(i = 0, elf_ppnt = elf_phdata;
            i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
-               int elf_prot, elf_flags, elf_fixed = MAP_FIXED_NOREPLACE;
+               int elf_prot, elf_flags;
                unsigned long k, vaddr;
                unsigned long total_size = 0;
 
@@ -911,13 +911,6 @@ out_free_interp:
                                         */
                                }
                        }
-
-                       /*
-                        * Some binaries have overlapping elf segments and then
-                        * we have to forcefully map over an existing mapping
-                        * e.g. over this newly established brk mapping.
-                        */
-                       elf_fixed = MAP_FIXED;
                }
 
                elf_prot = make_prot(elf_ppnt->p_flags);
@@ -930,7 +923,7 @@ out_free_interp:
                 * the ET_DYN load_addr calculations, proceed normally.
                 */
                if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
-                       elf_flags |= elf_fixed;
+                       elf_flags |= MAP_FIXED;
                } else if (loc->elf_ex.e_type == ET_DYN) {
                        /*
                         * This logic is run once for the first LOAD Program
@@ -966,7 +959,7 @@ out_free_interp:
                                load_bias = ELF_ET_DYN_BASE;
                                if (current->flags & PF_RANDOMIZE)
                                        load_bias += arch_mmap_rnd();
-                               elf_flags |= elf_fixed;
+                               elf_flags |= MAP_FIXED;
                        } else
                                load_bias = 0;
 
index bf7e3f23bba745528f863066c4d0be5224d8d2e1..670700cb1110692c2fc9e763f6ae028654a3d35a 100644 (file)
@@ -1761,6 +1761,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
                        btrfs_err(info,
 "bg %llu is a mixed block group but filesystem hasn't enabled mixed block groups",
                                  cache->key.objectid);
+                       btrfs_put_block_group(cache);
                        ret = -EINVAL;
                        goto error;
                }
index 19d669d12ca15226237e698630d7514b65004a88..fe2b8765d9e6b4fc6e991028895580a7c5211426 100644 (file)
@@ -734,8 +734,6 @@ struct btrfs_fs_info {
        struct btrfs_workqueue *fixup_workers;
        struct btrfs_workqueue *delayed_workers;
 
-       /* the extent workers do delayed refs on the extent allocation tree */
-       struct btrfs_workqueue *extent_workers;
        struct task_struct *transaction_kthread;
        struct task_struct *cleaner_kthread;
        u32 thread_pool_size;
@@ -2489,8 +2487,7 @@ int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
                                     int nitems, bool use_global_rsv);
 void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info,
                                      struct btrfs_block_rsv *rsv);
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free);
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes);
 
 int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
index d949d7d2abed8489baa5318f5e56a0d1462fb5bd..db9f2c58eb4af481158bb8a635a5f6a7872d4877 100644 (file)
@@ -381,7 +381,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
 out_qgroup:
        btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
 out_fail:
-       btrfs_inode_rsv_release(inode, true);
        if (delalloc_lock)
                mutex_unlock(&inode->delalloc_mutex);
        return ret;
@@ -418,7 +417,6 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * btrfs_delalloc_release_extents - release our outstanding_extents
  * @inode: the inode to balance the reservation for.
  * @num_bytes: the number of bytes we originally reserved with
- * @qgroup_free: do we need to free qgroup meta reservation or convert them.
  *
  * When we reserve space we increase outstanding_extents for the extents we may
  * add.  Once we've set the range as delalloc or created our ordered extents we
@@ -426,8 +424,7 @@ void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes,
  * temporarily tracked outstanding_extents.  This _must_ be used in conjunction
  * with btrfs_delalloc_reserve_metadata.
  */
-void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
-                                   bool qgroup_free)
+void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes)
 {
        struct btrfs_fs_info *fs_info = inode->root->fs_info;
        unsigned num_extents;
@@ -441,7 +438,7 @@ void btrfs_delalloc_release_extents(struct btrfs_inode *inode, u64 num_bytes,
        if (btrfs_is_testing(fs_info))
                return;
 
-       btrfs_inode_rsv_release(inode, qgroup_free);
+       btrfs_inode_rsv_release(inode, true);
 }
 
 /**
index 044981cf6df9149cd28d0f3c89cbe05b3be39e12..402b61bf345cd55dddd2efed7cf27576ef262a14 100644 (file)
@@ -2008,7 +2008,6 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
        btrfs_destroy_workqueue(fs_info->readahead_workers);
        btrfs_destroy_workqueue(fs_info->flush_workers);
        btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers);
-       btrfs_destroy_workqueue(fs_info->extent_workers);
        /*
         * Now that all other work queues are destroyed, we can safely destroy
         * the queues used for metadata I/O, since tasks from those other work
@@ -2214,10 +2213,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
                                      max_active, 2);
        fs_info->qgroup_rescan_workers =
                btrfs_alloc_workqueue(fs_info, "qgroup-rescan", flags, 1, 0);
-       fs_info->extent_workers =
-               btrfs_alloc_workqueue(fs_info, "extent-refs", flags,
-                                     min_t(u64, fs_devices->num_devices,
-                                           max_active), 8);
 
        if (!(fs_info->workers && fs_info->delalloc_workers &&
              fs_info->submit_workers && fs_info->flush_workers &&
@@ -2228,7 +2223,6 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info,
              fs_info->endio_freespace_worker && fs_info->rmw_workers &&
              fs_info->caching_workers && fs_info->readahead_workers &&
              fs_info->fixup_workers && fs_info->delayed_workers &&
-             fs_info->extent_workers &&
              fs_info->qgroup_rescan_workers)) {
                return -ENOMEM;
        }
index 8fe4eb7e504527914965012b048c0b27a8736b03..435a502a32261071d3bd4ac4e24d75367a349bbb 100644 (file)
@@ -1591,7 +1591,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
        struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct page **pages = NULL;
-       struct extent_state *cached_state = NULL;
        struct extent_changeset *data_reserved = NULL;
        u64 release_bytes = 0;
        u64 lockstart;
@@ -1611,6 +1610,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
                return -ENOMEM;
 
        while (iov_iter_count(i) > 0) {
+               struct extent_state *cached_state = NULL;
                size_t offset = offset_in_page(pos);
                size_t sector_offset;
                size_t write_bytes = min(iov_iter_count(i),
@@ -1692,7 +1692,7 @@ again:
                                    force_page_uptodate);
                if (ret) {
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        break;
                }
 
@@ -1704,7 +1704,7 @@ again:
                        if (extents_locked == -EAGAIN)
                                goto again;
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      reserve_bytes, true);
+                                                      reserve_bytes);
                        ret = extents_locked;
                        break;
                }
@@ -1758,11 +1758,21 @@ again:
                if (copied > 0)
                        ret = btrfs_dirty_pages(inode, pages, dirty_pages,
                                                pos, copied, &cached_state);
+
+               /*
+                * If we have not locked the extent range, because the range's
+                * start offset is >= i_size, we might still have a non-NULL
+                * cached extent state, acquired while marking the extent range
+                * as delalloc through btrfs_dirty_pages(). Therefore free any
+                * possible cached extent state to avoid a memory leak.
+                */
                if (extents_locked)
                        unlock_extent_cached(&BTRFS_I(inode)->io_tree,
                                             lockstart, lockend, &cached_state);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
-                                              true);
+               else
+                       free_extent_state(cached_state);
+
+               btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes);
                if (ret) {
                        btrfs_drop_pages(pages, num_pages);
                        break;
@@ -2057,25 +2067,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        struct btrfs_trans_handle *trans;
        struct btrfs_log_ctx ctx;
        int ret = 0, err;
-       u64 len;
 
-       /*
-        * If the inode needs a full sync, make sure we use a full range to
-        * avoid log tree corruption, due to hole detection racing with ordered
-        * extent completion for adjacent ranges, and assertion failures during
-        * hole detection.
-        */
-       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                    &BTRFS_I(inode)->runtime_flags)) {
-               start = 0;
-               end = LLONG_MAX;
-       }
-
-       /*
-        * The range length can be represented by u64, we have to do the typecasts
-        * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
-        */
-       len = (u64)end - (u64)start + 1;
        trace_btrfs_sync_file(file, datasync);
 
        btrfs_init_log_ctx(&ctx, inode);
@@ -2101,6 +2093,19 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
 
        atomic_inc(&root->log_batch);
 
+       /*
+        * If the inode needs a full sync, make sure we use a full range to
+        * avoid log tree corruption, due to hole detection racing with ordered
+        * extent completion for adjacent ranges, and assertion failures during
+        * hole detection. Do this while holding the inode lock, to avoid races
+        * with other tasks.
+        */
+       if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                    &BTRFS_I(inode)->runtime_flags)) {
+               start = 0;
+               end = LLONG_MAX;
+       }
+
        /*
         * Before we acquired the inode's lock, someone may have dirtied more
         * pages in the target range. We need to make sure that writeback for
@@ -2128,8 +2133,11 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
        /*
         * We have to do this here to avoid the priority inversion of waiting on
         * IO of a lower priority task while holding a transaction open.
+        *
+        * Also, the range length can be represented by u64, we have to do the
+        * typecasts to avoid signed overflow if it's [0, LLONG_MAX].
         */
-       ret = btrfs_wait_ordered_range(inode, start, len);
+       ret = btrfs_wait_ordered_range(inode, start, (u64)end - (u64)start + 1);
        if (ret) {
                up_write(&BTRFS_I(inode)->dio_sem);
                inode_unlock(inode);
index 63cad7865d751dd96f2e364fdbd6c92c967a7c6e..37345fb6191d774aff9b8b4fbf57301afab83e98 100644 (file)
@@ -501,13 +501,13 @@ again:
        ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc,
                                              prealloc, prealloc, &alloc_hint);
        if (ret) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
                btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc, true);
                goto out_put;
        }
 
        ret = btrfs_write_out_ino_cache(root, trans, path, inode);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), prealloc);
 out_put:
        iput(inode);
 out_release:
index a0546401bc0ab849e4bef60e2ad4ccc7441be6c0..c3f386b7cc0bd4d1b38fab14b56d1eec3c576d94 100644 (file)
@@ -2206,7 +2206,7 @@ again:
 
        ClearPageChecked(page);
        set_page_dirty(page);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
 out:
        unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, page_end,
                             &cached_state);
@@ -4951,7 +4951,7 @@ again:
        if (!page) {
                btrfs_delalloc_release_space(inode, data_reserved,
                                             block_start, blocksize, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
                ret = -ENOMEM;
                goto out;
        }
@@ -5018,7 +5018,7 @@ out_unlock:
        if (ret)
                btrfs_delalloc_release_space(inode, data_reserved, block_start,
                                             blocksize, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), blocksize);
        unlock_page(page);
        put_page(page);
 out:
@@ -6305,13 +6305,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
        u32 sizes[2];
        int nitems = name ? 2 : 1;
        unsigned long ptr;
+       unsigned int nofs_flag;
        int ret;
 
        path = btrfs_alloc_path();
        if (!path)
                return ERR_PTR(-ENOMEM);
 
+       nofs_flag = memalloc_nofs_save();
        inode = new_inode(fs_info->sb);
+       memalloc_nofs_restore(nofs_flag);
        if (!inode) {
                btrfs_free_path(path);
                return ERR_PTR(-ENOMEM);
@@ -8706,7 +8709,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
                } else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode, data_reserved,
                                        offset, count - (size_t)ret, true);
-               btrfs_delalloc_release_extents(BTRFS_I(inode), count, false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), count);
        }
 out:
        if (wakeup)
@@ -9056,7 +9059,7 @@ again:
        unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
 
        if (!ret2) {
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                sb_end_pagefault(inode->i_sb);
                extent_changeset_free(data_reserved);
                return VM_FAULT_LOCKED;
@@ -9065,7 +9068,7 @@ again:
 out_unlock:
        unlock_page(page);
 out:
-       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
+       btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
        btrfs_delalloc_release_space(inode, data_reserved, page_start,
                                     reserved_space, (ret != 0));
 out_noreserve:
index de730e56d3f5106ee97d7fa316ee5e855487d954..7c145a41decd9aa135992031cdcf4c110e876a8e 100644 (file)
@@ -1360,8 +1360,7 @@ again:
                unlock_page(pages[i]);
                put_page(pages[i]);
        }
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      false);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return i_done;
 out:
@@ -1372,8 +1371,7 @@ out:
        btrfs_delalloc_release_space(inode, data_reserved,
                        start_index << PAGE_SHIFT,
                        page_cnt << PAGE_SHIFT, true);
-       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT,
-                                      true);
+       btrfs_delalloc_release_extents(BTRFS_I(inode), page_cnt << PAGE_SHIFT);
        extent_changeset_free(data_reserved);
        return ret;
 
index c4bb69941c77c721a7b6c69bb11d83938bfb69d2..3ad151655eb88680b75cfbda5c8005daf3d5d05a 100644 (file)
@@ -3629,7 +3629,7 @@ int __btrfs_qgroup_reserve_meta(struct btrfs_root *root, int num_bytes,
                return 0;
 
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, (s64)num_bytes);
+       trace_qgroup_meta_reserve(root, (s64)num_bytes, type);
        ret = qgroup_reserve(root, num_bytes, enforce, type);
        if (ret < 0)
                return ret;
@@ -3676,7 +3676,7 @@ void __btrfs_qgroup_free_meta(struct btrfs_root *root, int num_bytes,
         */
        num_bytes = sub_root_meta_rsv(root, num_bytes, type);
        BUG_ON(num_bytes != round_down(num_bytes, fs_info->nodesize));
-       trace_qgroup_meta_reserve(root, type, -(s64)num_bytes);
+       trace_qgroup_meta_reserve(root, -(s64)num_bytes, type);
        btrfs_qgroup_free_refroot(fs_info, root->root_key.objectid,
                                  num_bytes, type);
 }
index e87cbdad02a37bddef15b3adac4330e5251970bc..b57f3618e58e305f5520dbf7f472df816180b8e4 100644 (file)
@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root,
        struct btrfs_extent_data_ref *dref;
        struct btrfs_shared_data_ref *sref;
        u32 count;
-       int i = 0, tree_block_level = 0, ret;
+       int i = 0, tree_block_level = 0, ret = 0;
        struct btrfs_key key;
        int nritems = btrfs_header_nritems(leaf);
 
index 00504657b60225f5c00346f16098609db679f90e..5cd42b66818cd4ab3b935b809de32aca91a3fefc 100644 (file)
@@ -3277,6 +3277,8 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        if (!page) {
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
+                               btrfs_delalloc_release_extents(BTRFS_I(inode),
+                                                       PAGE_SIZE);
                                ret = -ENOMEM;
                                goto out;
                        }
@@ -3297,7 +3299,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                                btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                        PAGE_SIZE, true);
                                btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                              PAGE_SIZE, true);
+                                                              PAGE_SIZE);
                                ret = -EIO;
                                goto out;
                        }
@@ -3326,7 +3328,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                        btrfs_delalloc_release_metadata(BTRFS_I(inode),
                                                         PAGE_SIZE, true);
                        btrfs_delalloc_release_extents(BTRFS_I(inode),
-                                                      PAGE_SIZE, true);
+                                                      PAGE_SIZE);
 
                        clear_extent_bits(&BTRFS_I(inode)->io_tree,
                                          page_start, page_end,
@@ -3342,8 +3344,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
                put_page(page);
 
                index++;
-               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE,
-                                              false);
+               btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE);
                balance_dirty_pages_ratelimited(inode->i_mapping);
                btrfs_throttle(fs_info);
        }
index f3215028235c494c16d46b89c16733c7be4399d7..123ac54af071a8591769bbc833e65c653c355932 100644 (file)
@@ -5085,7 +5085,7 @@ static int clone_range(struct send_ctx *sctx,
        struct btrfs_path *path;
        struct btrfs_key key;
        int ret;
-       u64 clone_src_i_size;
+       u64 clone_src_i_size = 0;
 
        /*
         * Prevent cloning from a zero offset with a length matching the sector
index 29b82a7955227bc68daa7628ceeceff95e2204ee..8a6cc600bf183d4a9182915c3440924720e94fad 100644 (file)
@@ -2932,7 +2932,8 @@ out:
  * in the tree of log roots
  */
 static int update_log_root(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *log)
+                          struct btrfs_root *log,
+                          struct btrfs_root_item *root_item)
 {
        struct btrfs_fs_info *fs_info = log->fs_info;
        int ret;
@@ -2940,10 +2941,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
        if (log->log_transid == 1) {
                /* insert root item on the first sync */
                ret = btrfs_insert_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        } else {
                ret = btrfs_update_root(trans, fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
+                               &log->root_key, root_item);
        }
        return ret;
 }
@@ -3041,6 +3042,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_root *log = root->log_root;
        struct btrfs_root *log_root_tree = fs_info->log_root_tree;
+       struct btrfs_root_item new_root_item;
        int log_transid = 0;
        struct btrfs_log_ctx root_log_ctx;
        struct blk_plug plug;
@@ -3104,17 +3106,25 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
                goto out;
        }
 
+       /*
+        * We _must_ update under the root->log_mutex in order to make sure we
+        * have a consistent view of the log root we are trying to commit at
+        * this moment.
+        *
+        * We _must_ copy this into a local copy, because we are not holding the
+        * log_root_tree->log_mutex yet.  This is important because when we
+        * commit the log_root_tree we must have a consistent view of the
+        * log_root_tree when we update the super block to point at the
+        * log_root_tree bytenr.  If we update the log_root_tree here we'll race
+        * with the commit and possibly point at the new block which we may not
+        * have written out.
+        */
        btrfs_set_root_node(&log->root_item, log->node);
+       memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
 
        root->log_transid++;
        log->log_transid = root->log_transid;
        root->log_start_pid = 0;
-       /*
-        * Update or create log root item under the root's log_mutex to prevent
-        * races with concurrent log syncs that can lead to failure to update
-        * log root item because it was not created yet.
-        */
-       ret = update_log_root(trans, log);
        /*
         * IO has been started, blocks of the log tree have WRITTEN flag set
         * in their headers. new modifications of the log will be written to
@@ -3135,6 +3145,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        mutex_unlock(&log_root_tree->log_mutex);
 
        mutex_lock(&log_root_tree->log_mutex);
+
+       /*
+        * Now we are safe to update the log_root_tree because we're under the
+        * log_mutex, and we're a current writer so we're holding the commit
+        * open until we drop the log_mutex.
+        */
+       ret = update_log_root(trans, log, &new_root_item);
+
        if (atomic_dec_and_test(&log_root_tree->log_writers)) {
                /* atomic_dec_and_test implies a barrier */
                cond_wake_up_nomb(&log_root_tree->log_writer_wait);
index cdd7af424033c22bbc3932d49674c2d907c4def0..bdfe4493e43a4f437d674d35173098d473092ff4 100644 (file)
@@ -3845,7 +3845,11 @@ static int alloc_profile_is_valid(u64 flags, int extended)
                return !extended; /* "0" is valid for usual profiles */
 
        /* true if exactly one bit set */
-       return is_power_of_2(flags);
+       /*
+        * Don't use is_power_of_2(unsigned long) because it won't work
+        * for the single profile (1ULL << 48) on 32-bit CPUs.
+        */
+       return flags != 0 && (flags & (flags - 1)) == 0;
 }
 
 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
index a8a8f84f3bbfae462ba8001faa0dac7721ce883f..a5163296d9d9b10518a1942ee95c2a96f2301cf6 100644 (file)
@@ -384,8 +384,8 @@ static int parse_reply_info_readdir(void **p, void *end,
        }
 
 done:
-       if (*p != end)
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
 
 bad:
@@ -406,12 +406,10 @@ static int parse_reply_info_filelock(void **p, void *end,
                goto bad;
 
        info->filelock_reply = *p;
-       *p += sizeof(*info->filelock_reply);
 
-       if (unlikely(*p != end))
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
-
 bad:
        return -EIO;
 }
@@ -425,18 +423,21 @@ static int parse_reply_info_create(void **p, void *end,
 {
        if (features == (u64)-1 ||
            (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
+               /* Malformed reply? */
                if (*p == end) {
                        info->has_create_ino = false;
                } else {
                        info->has_create_ino = true;
-                       info->ino = ceph_decode_64(p);
+                       ceph_decode_64_safe(p, end, info->ino, bad);
                }
+       } else {
+               if (*p != end)
+                       goto bad;
        }
 
-       if (unlikely(*p != end))
-               goto bad;
+       /* Skip over any unrecognized fields */
+       *p = end;
        return 0;
-
 bad:
        return -EIO;
 }
index 2e9c7f493f99ad649c659e2460d9e6ae4654cb66..1a135d1b85bd8c3dca017660d1c786a4d4578280 100644 (file)
@@ -169,18 +169,32 @@ cifs_read_super(struct super_block *sb)
        else
                sb->s_maxbytes = MAX_NON_LFS;
 
-       /* BB FIXME fix time_gran to be larger for LANMAN sessions */
-       sb->s_time_gran = 100;
-
-       if (tcon->unix_ext) {
-               ts = cifs_NTtimeToUnix(0);
+       /*
+        * Some very old servers like DOS and OS/2 used 2 second granularity
+        * (while all current servers use 100ns granularity - see MS-DTYP)
+        * but 1 second is the maximum allowed granularity for the VFS
+        * so for old servers set time granularity to 1 second while for
+        * everything else (current servers) set it to 100ns.
+        */
+       if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
+           ((tcon->ses->capabilities &
+             tcon->ses->server->vals->cap_nt_find) == 0) &&
+           !tcon->unix_ext) {
+               sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
+               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
                sb->s_time_min = ts.tv_sec;
-               ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
+               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
+                                   cpu_to_le16(SMB_TIME_MAX), 0);
                sb->s_time_max = ts.tv_sec;
        } else {
-               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
+               /*
+                * Almost every server, including all SMB2+, uses DCE TIME
+                * ie 100 nanosecond units, since 1601.  See MS-DTYP and MS-FSCC
+                */
+               sb->s_time_gran = 100;
+               ts = cifs_NTtimeToUnix(0);
                sb->s_time_min = ts.tv_sec;
-               ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX), cpu_to_le16(SMB_TIME_MAX), 0);
+               ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
                sb->s_time_max = ts.tv_sec;
        }
 
index 2e960e1049db565d4f8a64a731329f74f8cc5404..d78bfcc191560804b694b965a381b21f7b5962f2 100644 (file)
@@ -1210,7 +1210,7 @@ struct cifs_search_info {
        bool smallBuf:1; /* so we know which buf_release function to call */
 };
 
-#define ACL_NO_MODE    -1
+#define ACL_NO_MODE    ((umode_t)(-1))
 struct cifs_open_parms {
        struct cifs_tcon *tcon;
        struct cifs_sb_info *cifs_sb;
@@ -1391,6 +1391,11 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
 struct cifsInodeInfo {
        bool can_cache_brlcks;
        struct list_head llist; /* locks helb by this inode */
+       /*
+        * NOTE: Some code paths call down_read(lock_sem) twice, so
+        * we must always use use cifs_down_write() instead of down_write()
+        * for this semaphore to avoid deadlocks.
+        */
        struct rw_semaphore lock_sem;   /* protect the fields above */
        /* BB add in lists for dirty pages i.e. write caching info for oplock */
        struct list_head openFileList;
index e53e9f62b87b7b043f11010877ab59c5937c7014..fe597d3d52088366615c5ec5d295ee7218f4d9ed 100644 (file)
@@ -170,6 +170,7 @@ extern int cifs_unlock_range(struct cifsFileInfo *cfile,
                             struct file_lock *flock, const unsigned int xid);
 extern int cifs_push_mandatory_locks(struct cifsFileInfo *cfile);
 
+extern void cifs_down_write(struct rw_semaphore *sem);
 extern struct cifsFileInfo *cifs_new_fileinfo(struct cifs_fid *fid,
                                              struct file *file,
                                              struct tcon_link *tlink,
index 2850c3ce43919c4581be4818e29f905e45512b9a..ccaa8bad336f9d7e2b67cb1364a4ca4b6f1718d4 100644 (file)
@@ -564,9 +564,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
        spin_lock(&GlobalMid_Lock);
        list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+               kref_get(&mid_entry->refcount);
                if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
                        mid_entry->mid_state = MID_RETRY_NEEDED;
                list_move(&mid_entry->qhead, &retry_list);
+               mid_entry->mid_flags |= MID_DELETED;
        }
        spin_unlock(&GlobalMid_Lock);
        mutex_unlock(&server->srv_mutex);
@@ -576,6 +578,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                list_del_init(&mid_entry->qhead);
                mid_entry->callback(mid_entry);
+               cifs_mid_q_entry_release(mid_entry);
        }
 
        if (cifs_rdma_enabled(server)) {
@@ -895,8 +898,10 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
        if (mid->mid_flags & MID_DELETED)
                printk_once(KERN_WARNING
                            "trying to dequeue a deleted mid\n");
-       else
+       else {
                list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 }
 
@@ -966,8 +971,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
                        mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
                        cifs_dbg(FYI, "Clearing mid 0x%llx\n", mid_entry->mid);
+                       kref_get(&mid_entry->refcount);
                        mid_entry->mid_state = MID_SHUTDOWN;
                        list_move(&mid_entry->qhead, &dispose_list);
+                       mid_entry->mid_flags |= MID_DELETED;
                }
                spin_unlock(&GlobalMid_Lock);
 
@@ -977,6 +984,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
                        cifs_dbg(FYI, "Callback mid 0x%llx\n", mid_entry->mid);
                        list_del_init(&mid_entry->qhead);
                        mid_entry->callback(mid_entry);
+                       cifs_mid_q_entry_release(mid_entry);
                }
                /* 1/8th of sec is more than enough time for them to exit */
                msleep(125);
@@ -3882,8 +3890,12 @@ generic_ip_connect(struct TCP_Server_Info *server)
 
        rc = socket->ops->connect(socket, saddr, slen,
                                  server->noblockcnt ? O_NONBLOCK : 0);
-
-       if (rc == -EINPROGRESS)
+       /*
+        * When mounting SMB root file systems, we do not want to block in
+        * connect. Otherwise bail out and then let cifs_reconnect() perform
+        * reconnect failover - if possible.
+        */
+       if (server->noblockcnt && rc == -EINPROGRESS)
                rc = 0;
        if (rc < 0) {
                cifs_dbg(FYI, "Error %d connecting to server\n", rc);
@@ -4264,7 +4276,7 @@ static int mount_get_conns(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
                server->ops->qfs_tcon(*xid, tcon);
                if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
                        if (tcon->fsDevInfo.DeviceCharacteristics &
-                           FILE_READ_ONLY_DEVICE)
+                           cpu_to_le32(FILE_READ_ONLY_DEVICE))
                                cifs_dbg(VFS, "mounted to read only share\n");
                        else if ((cifs_sb->mnt_cifs_flags &
                                  CIFS_MOUNT_RW_CACHE) == 0)
@@ -4445,7 +4457,7 @@ static int setup_dfs_tgt_conn(const char *path,
        int rc;
        struct dfs_info3_param ref = {0};
        char *mdata = NULL, *fake_devname = NULL;
-       struct smb_vol fake_vol = {0};
+       struct smb_vol fake_vol = {NULL};
 
        cifs_dbg(FYI, "%s: dfs path: %s\n", __func__, path);
 
index dd5ac841aefa7fa45a9fe63cc24d1d07de4dd593..7ce689d31aa2d446e27cfe20ab2ab4bb79d3ac2e 100644 (file)
@@ -738,10 +738,16 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
 static int
 cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
 {
+       struct inode *inode;
+
        if (flags & LOOKUP_RCU)
                return -ECHILD;
 
        if (d_really_is_positive(direntry)) {
+               inode = d_inode(direntry);
+               if ((flags & LOOKUP_REVAL) && !CIFS_CACHE_READ(CIFS_I(inode)))
+                       CIFS_I(inode)->time = 0; /* force reval */
+
                if (cifs_revalidate_dentry(direntry))
                        return 0;
                else {
@@ -752,7 +758,7 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
                         * attributes will have been updated by
                         * cifs_revalidate_dentry().
                         */
-                       if (IS_AUTOMOUNT(d_inode(direntry)) &&
+                       if (IS_AUTOMOUNT(inode) &&
                           !(direntry->d_flags & DCACHE_NEED_AUTOMOUNT)) {
                                spin_lock(&direntry->d_lock);
                                direntry->d_flags |= DCACHE_NEED_AUTOMOUNT;
index 4b95700c507c793abc9e594ca4ea4aeb42a2ceb7..fa7b0fa72bb360d7133b13b05788e1e591faf547 100644 (file)
@@ -253,6 +253,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
                rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
                                         xid, fid);
 
+       if (rc) {
+               server->ops->close(xid, tcon, fid);
+               if (rc == -ESTALE)
+                       rc = -EOPENSTALE;
+       }
+
 out:
        kfree(buf);
        return rc;
@@ -275,6 +281,13 @@ cifs_has_mand_locks(struct cifsInodeInfo *cinode)
        return has_locks;
 }
 
+void
+cifs_down_write(struct rw_semaphore *sem)
+{
+       while (!down_write_trylock(sem))
+               msleep(10);
+}
+
 struct cifsFileInfo *
 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
                  struct tcon_link *tlink, __u32 oplock)
@@ -300,7 +313,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
        INIT_LIST_HEAD(&fdlocks->locks);
        fdlocks->cfile = cfile;
        cfile->llist = fdlocks;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add(&fdlocks->llist, &cinode->llist);
        up_write(&cinode->lock_sem);
 
@@ -399,10 +412,11 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        bool oplock_break_cancelled;
 
        spin_lock(&tcon->open_file_lock);
-
+       spin_lock(&cifsi->open_file_lock);
        spin_lock(&cifs_file->file_info_lock);
        if (--cifs_file->count > 0) {
                spin_unlock(&cifs_file->file_info_lock);
+               spin_unlock(&cifsi->open_file_lock);
                spin_unlock(&tcon->open_file_lock);
                return;
        }
@@ -415,9 +429,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
        cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
 
        /* remove it from the lists */
-       spin_lock(&cifsi->open_file_lock);
        list_del(&cifs_file->flist);
-       spin_unlock(&cifsi->open_file_lock);
        list_del(&cifs_file->tlist);
        atomic_dec(&tcon->num_local_opens);
 
@@ -434,6 +446,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
                cifs_set_oplock_level(cifsi, 0);
        }
 
+       spin_unlock(&cifsi->open_file_lock);
        spin_unlock(&tcon->open_file_lock);
 
        oplock_break_cancelled = wait_oplock_handler ?
@@ -458,7 +471,7 @@ void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
         * Delete any outstanding lock records. We'll lose them when the file
         * is closed anyway.
         */
-       down_write(&cifsi->lock_sem);
+       cifs_down_write(&cifsi->lock_sem);
        list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
                list_del(&li->llist);
                cifs_del_lock_waiters(li);
@@ -1021,7 +1034,7 @@ static void
 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
 {
        struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_add_tail(&lock->llist, &cfile->llist->locks);
        up_write(&cinode->lock_sem);
 }
@@ -1043,7 +1056,7 @@ cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
 
 try_again:
        exist = false;
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
 
        exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
                                        lock->type, lock->flags, &conf_lock,
@@ -1066,7 +1079,7 @@ try_again:
                                        (lock->blist.next == &lock->blist));
                if (!rc)
                        goto try_again;
-               down_write(&cinode->lock_sem);
+               cifs_down_write(&cinode->lock_sem);
                list_del_init(&lock->blist);
        }
 
@@ -1119,7 +1132,7 @@ cifs_posix_lock_set(struct file *file, struct file_lock *flock)
                return rc;
 
 try_again:
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1325,7 +1338,7 @@ cifs_push_locks(struct cifsFileInfo *cfile)
        int rc = 0;
 
        /* we are going to update can_cache_brlcks here - need a write access */
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        if (!cinode->can_cache_brlcks) {
                up_write(&cinode->lock_sem);
                return rc;
@@ -1516,7 +1529,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
        if (!buf)
                return -ENOMEM;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        for (i = 0; i < 2; i++) {
                cur = buf;
                num = 0;
@@ -1840,13 +1853,12 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
 {
        struct cifsFileInfo *open_file = NULL;
        struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        /* we could simply get the first_list_entry since write-only entries
           are always at the end of the list but since the first entry might
           have a close pending, we go through the whole list */
@@ -1858,7 +1870,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                                /* found a good file */
                                /* lock it so it will not be closed on us */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                return open_file;
                        } /* else might as well continue, and look for
                             another, or simply have the caller reopen it
@@ -1866,7 +1878,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
                } else /* write only file */
                        break; /* write only files are last so must be done */
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return NULL;
 }
 
@@ -1877,7 +1889,6 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
 {
        struct cifsFileInfo *open_file, *inv_file = NULL;
        struct cifs_sb_info *cifs_sb;
-       struct cifs_tcon *tcon;
        bool any_available = false;
        int rc = -EBADF;
        unsigned int refind = 0;
@@ -1897,16 +1908,15 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
        }
 
        cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
-       tcon = cifs_sb_master_tcon(cifs_sb);
 
        /* only filter by fsuid on multiuser mounts */
        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
                fsuid_only = false;
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
 refind_writable:
        if (refind > MAX_REOPEN_ATT) {
-               spin_unlock(&tcon->open_file_lock);
+               spin_unlock(&cifs_inode->open_file_lock);
                return rc;
        }
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
@@ -1918,7 +1928,7 @@ refind_writable:
                        if (!open_file->invalidHandle) {
                                /* found a good writable file */
                                cifsFileInfo_get(open_file);
-                               spin_unlock(&tcon->open_file_lock);
+                               spin_unlock(&cifs_inode->open_file_lock);
                                *ret_file = open_file;
                                return 0;
                        } else {
@@ -1938,7 +1948,7 @@ refind_writable:
                cifsFileInfo_get(inv_file);
        }
 
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
 
        if (inv_file) {
                rc = cifs_reopen_file(inv_file, false);
@@ -1953,7 +1963,7 @@ refind_writable:
                cifsFileInfo_put(inv_file);
                ++refind;
                inv_file = NULL;
-               spin_lock(&tcon->open_file_lock);
+               spin_lock(&cifs_inode->open_file_lock);
                goto refind_writable;
        }
 
@@ -4461,17 +4471,15 @@ static int cifs_readpage(struct file *file, struct page *page)
 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
 {
        struct cifsFileInfo *open_file;
-       struct cifs_tcon *tcon =
-               cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
 
-       spin_lock(&tcon->open_file_lock);
+       spin_lock(&cifs_inode->open_file_lock);
        list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
                if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
-                       spin_unlock(&tcon->open_file_lock);
+                       spin_unlock(&cifs_inode->open_file_lock);
                        return 1;
                }
        }
-       spin_unlock(&tcon->open_file_lock);
+       spin_unlock(&cifs_inode->open_file_lock);
        return 0;
 }
 
index 3bae2e53f0b85fe3b78885d34e12070ce57cfb53..df9377828e2f435bdb32339f48c2ad08ac9bdd32 100644 (file)
@@ -414,6 +414,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -421,6 +422,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                /* if filetype is different, return error */
                if (unlikely(((*pinode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*pinode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgiiu_exit;
                }
@@ -933,6 +935,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if uniqueid is different, return error */
                if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
                    CIFS_I(*inode)->uniqueid != fattr.cf_uniqueid)) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
@@ -940,6 +943,7 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                /* if filetype is different, return error */
                if (unlikely(((*inode)->i_mode & S_IFMT) !=
                    (fattr.cf_mode & S_IFMT))) {
+                       CIFS_I(*inode)->time = 0; /* force reval */
                        rc = -ESTALE;
                        goto cgii_exit;
                }
@@ -2471,9 +2475,9 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
                        rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
                        cifsFileInfo_put(wfile);
                        if (rc)
-                               return rc;
+                               goto cifs_setattr_exit;
                } else if (rc != -EBADF)
-                       return rc;
+                       goto cifs_setattr_exit;
                else
                        rc = 0;
        }
index 49c17ee182544d6f986334379d76976b6f3ef3b3..9b41436fb8dbb3d98337a10cd4b3bfc8d7135647 100644 (file)
@@ -117,10 +117,6 @@ static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
        {0, 0}
 };
 
-static const struct smb_to_posix_error mapping_table_ERRHRD[] = {
-       {0, 0}
-};
-
 /*
  * Convert a string containing text IPv4 or IPv6 address to binary form.
  *
index b7421a0963195137474d9e3a15075796743c6157..514810694c0f5a7b48bfa6139401beb8c40c68cf 100644 (file)
@@ -171,6 +171,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
        /* we do not want to loop forever */
        last_mid = cur_mid;
        cur_mid++;
+       /* avoid 0xFFFF MID */
+       if (cur_mid == 0xffff)
+               cur_mid++;
 
        /*
         * This nested loop looks more expensive than it is.
index e6a1fc72018fd1821f8499a70eebe6edd7400bdc..8b0b512c57920cebd5a022c2357a31e376cf87ed 100644 (file)
@@ -145,7 +145,7 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
 
        cur = buf;
 
-       down_write(&cinode->lock_sem);
+       cifs_down_write(&cinode->lock_sem);
        list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
                if (flock->fl_start > li->offset ||
                    (flock->fl_start + length) <
index 4c092259646740da18a7e3be2b31d1647320aaa4..cd55af9b7cc5aca5669fb53018efb0ea29494bff 100644 (file)
@@ -4084,6 +4084,7 @@ free_pages:
 
        kfree(dw->ppages);
        cifs_small_buf_release(dw->buf);
+       kfree(dw);
 }
 
 
@@ -4157,7 +4158,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
                dw->server = server;
                dw->ppages = pages;
                dw->len = len;
-               queue_work(cifsiod_wq, &dw->decrypt);
+               queue_work(decrypt_wq, &dw->decrypt);
                *num_mids = 0; /* worker thread takes care of finding mid */
                return -1;
        }
index 85f9d614d96874c77b88fa7d5bfb2c58c6a1c079..05149862aea42e6a33736fe77856d8a822af4480 100644 (file)
@@ -751,8 +751,8 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
        unsigned int num = *num_iovec;
 
        iov[num].iov_base = create_posix_buf(mode);
-       if (mode == -1)
-               cifs_dbg(VFS, "illegal mode\n"); /* BB REMOVEME */
+       if (mode == ACL_NO_MODE)
+               cifs_dbg(FYI, "illegal mode\n");
        if (iov[num].iov_base == NULL)
                return -ENOMEM;
        iov[num].iov_len = sizeof(struct create_posix);
@@ -2521,11 +2521,8 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
                        return rc;
        }
 
-       /* TODO: add handling for the mode on create */
-       if (oparms->disposition == FILE_CREATE)
-               cifs_dbg(VFS, "mode is 0x%x\n", oparms->mode); /* BB REMOVEME */
-
-       if ((oparms->disposition == FILE_CREATE) && (oparms->mode != -1)) {
+       if ((oparms->disposition == FILE_CREATE) &&
+           (oparms->mode != ACL_NO_MODE)) {
                if (n_iov > 2) {
                        struct create_context *ccontext =
                            (struct create_context *)iov[n_iov-1].iov_base;
@@ -3217,7 +3214,8 @@ SMB2_notify_init(const unsigned int xid, struct smb_rqst *rqst,
 
        req->PersistentFileId = persistent_fid;
        req->VolatileFileId = volatile_fid;
-       req->OutputBufferLength = SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE;
+       req->OutputBufferLength =
+               cpu_to_le32(SMB2_MAX_BUFFER_SIZE - MAX_SMB2_HDR_SIZE);
        req->CompletionFilter = cpu_to_le32(completion_filter);
        if (watch_tree)
                req->Flags = cpu_to_le16(SMB2_WATCH_TREE);
index da3a6d58080876d7912e031d8ab42f7db13b8f7b..71b2930b8e0b89d9301251b43dea35fab01a9619 100644 (file)
@@ -150,6 +150,10 @@ extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
                           bool is_fsctl, char *in_data, u32 indatalen,
                           __u32 max_response_size);
 extern void SMB2_ioctl_free(struct smb_rqst *rqst);
+extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
+                       u64 persistent_fid, u64 volatile_fid, bool watch_tree,
+                       u32 completion_filter);
+
 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
                      u64 persistent_file_id, u64 volatile_file_id);
 extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
index 308ad0f495e10d513f9219e92d8ade62558337f6..ca3de62688d6e8fab18504ef09cc4be5ac9c709a 100644 (file)
@@ -86,22 +86,8 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
 
 static void _cifs_mid_q_entry_release(struct kref *refcount)
 {
-       struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
-                                              refcount);
-
-       mempool_free(mid, cifs_mid_poolp);
-}
-
-void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
-{
-       spin_lock(&GlobalMid_Lock);
-       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
-       spin_unlock(&GlobalMid_Lock);
-}
-
-void
-DeleteMidQEntry(struct mid_q_entry *midEntry)
-{
+       struct mid_q_entry *midEntry =
+                       container_of(refcount, struct mid_q_entry, refcount);
 #ifdef CONFIG_CIFS_STATS2
        __le16 command = midEntry->server->vals->lock_cmd;
        __u16 smb_cmd = le16_to_cpu(midEntry->command);
@@ -166,6 +152,19 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
                }
        }
 #endif
+
+       mempool_free(midEntry, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+       spin_lock(&GlobalMid_Lock);
+       kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+       spin_unlock(&GlobalMid_Lock);
+}
+
+void DeleteMidQEntry(struct mid_q_entry *midEntry)
+{
        cifs_mid_q_entry_release(midEntry);
 }
 
@@ -173,8 +172,10 @@ void
 cifs_delete_mid(struct mid_q_entry *mid)
 {
        spin_lock(&GlobalMid_Lock);
-       list_del_init(&mid->qhead);
-       mid->mid_flags |= MID_DELETED;
+       if (!(mid->mid_flags & MID_DELETED)) {
+               list_del_init(&mid->qhead);
+               mid->mid_flags |= MID_DELETED;
+       }
        spin_unlock(&GlobalMid_Lock);
 
        DeleteMidQEntry(mid);
@@ -872,7 +873,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
                rc = -EHOSTDOWN;
                break;
        default:
-               list_del_init(&mid->qhead);
+               if (!(mid->mid_flags & MID_DELETED)) {
+                       list_del_init(&mid->qhead);
+                       mid->mid_flags |= MID_DELETED;
+               }
                cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
                         __func__, mid->mid, mid->mid_state);
                rc = -EIO;
index 6bf81f931de39e48bc2983a1c901b1a6643443f8..2cc43cd914eb8332e3c1ccbdcfafa2e6ce64baee 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -220,10 +220,11 @@ static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
 
        for (;;) {
                entry = xas_find_conflict(xas);
+               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
+                       return entry;
                if (dax_entry_order(entry) < order)
                        return XA_RETRY_ENTRY;
-               if (!entry || WARN_ON_ONCE(!xa_is_value(entry)) ||
-                               !dax_is_locked(entry))
+               if (!dax_is_locked(entry))
                        return entry;
 
                wq = dax_entry_waitqueue(xas, entry, &ewait.key);
index ae196784f487afa9a4117e0775d42559645d4b73..9329ced91f1d8563010dc45a9be430e62ba659d6 100644 (file)
@@ -241,9 +241,8 @@ void dio_warn_stale_pagecache(struct file *filp)
        }
 }
 
-/**
+/*
  * dio_complete() - called when all DIO BIO I/O has been completed
- * @offset: the byte offset in the file of the completed operation
  *
  * This drops i_dio_count, lets interested parties know that a DIO operation
  * has completed, and calculates the resulting return code for the operation.
index 8a9fcbd0e8ac197fbe22d04c23fc66bb9feff7b8..fc3a8d8064f84f8ee1ce24787c2632ca17c09650 100644 (file)
@@ -34,11 +34,15 @@ static void erofs_readendio(struct bio *bio)
 
 struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
 {
-       struct inode *const bd_inode = sb->s_bdev->bd_inode;
-       struct address_space *const mapping = bd_inode->i_mapping;
+       struct address_space *const mapping = sb->s_bdev->bd_inode->i_mapping;
+       struct page *page;
 
-       return read_cache_page_gfp(mapping, blkaddr,
+       page = read_cache_page_gfp(mapping, blkaddr,
                                   mapping_gfp_constraint(mapping, ~__GFP_FS));
+       /* should already be PageUptodate */
+       if (!IS_ERR(page))
+               lock_page(page);
+       return page;
 }
 
 static int erofs_map_blocks_flatmode(struct inode *inode,
index caf9a95173b0fb8b4bb6f2c648564e8052b9ff5f..0e369494f2f2c43601e9d8b97c285258db1877aa 100644 (file)
@@ -105,9 +105,9 @@ static int erofs_read_superblock(struct super_block *sb)
        int ret;
 
        page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
-       if (!page) {
+       if (IS_ERR(page)) {
                erofs_err(sb, "cannot read erofs superblock");
-               return -EIO;
+               return PTR_ERR(page);
        }
 
        sbi = EROFS_SB(sb);
index 96e34c90f81438fc20493f96dfda98414608e462..fad80c97d2476fbeaff3858e7a10c0b4c6e583c6 100644 (file)
@@ -575,7 +575,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
        struct erofs_map_blocks *const map = &fe->map;
        struct z_erofs_collector *const clt = &fe->clt;
        const loff_t offset = page_offset(page);
-       bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
+       bool tight = true;
 
        enum z_erofs_cache_alloctype cache_strategy;
        enum z_erofs_page_type page_type;
@@ -628,8 +628,16 @@ restart_now:
        preload_compressed_pages(clt, MNGD_MAPPING(sbi),
                                 cache_strategy, pagepool);
 
-       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
 hitted:
+       /*
+        * Ensure the current partial page belongs to this submit chain rather
+        * than other concurrent submit chains or the noio(bypass) chain since
+        * those chains are handled asynchronously thus the page cannot be used
+        * for inplace I/O or pagevec (should be processed in strict order.)
+        */
+       tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED &&
+                 clt->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE);
+
        cur = end - min_t(unsigned int, offset + end - map->m_la, end);
        if (!(map->m_flags & EROFS_MAP_MAPPED)) {
                zero_user_segment(page, cur, end);
index 8aaa7eec7b74a24018795aae91e29781bdbc89ac..8461a63220398024c2aef5d4795d2f2aadd4ebfa 100644 (file)
@@ -164,8 +164,13 @@ static void finish_writeback_work(struct bdi_writeback *wb,
 
        if (work->auto_free)
                kfree(work);
-       if (done && atomic_dec_and_test(&done->cnt))
-               wake_up_all(done->waitq);
+       if (done) {
+               wait_queue_head_t *waitq = done->waitq;
+
+               /* @done can't be accessed after the following dec */
+               if (atomic_dec_and_test(&done->cnt))
+                       wake_up_all(waitq);
+       }
 }
 
 static void wb_queue_work(struct bdi_writeback *wb,
@@ -900,7 +905,7 @@ restart:
  * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
  * @bdi_id: target bdi id
  * @memcg_id: target memcg css id
- * @nr_pages: number of pages to write, 0 for best-effort dirty flushing
+ * @nr: number of pages to write, 0 for best-effort dirty flushing
  * @reason: reason why some writeback work initiated
  * @done: target wb_completion
  *
index 6419a2b3510deada202d4ccd9a4ca3447f8baa07..3e8cebfb59b7b38155f89196d7b2f2b8e764810b 100644 (file)
@@ -5,6 +5,7 @@
 
 obj-$(CONFIG_FUSE_FS) += fuse.o
 obj-$(CONFIG_CUSE) += cuse.o
-obj-$(CONFIG_VIRTIO_FS) += virtio_fs.o
+obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
 
 fuse-objs := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+virtiofs-y += virtio_fs.o
index dadd617d826c93825500679f33a524940c15c2e7..ed1abc9e33cfcc165442c11b435f36a294a892a2 100644 (file)
@@ -276,10 +276,12 @@ static void flush_bg_queue(struct fuse_conn *fc)
 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        struct fuse_iqueue *fiq = &fc->iq;
-       bool async = req->args->end;
+       bool async;
 
        if (test_and_set_bit(FR_FINISHED, &req->flags))
                goto put_request;
+
+       async = req->args->end;
        /*
         * test_and_set_bit() implies smp_mb() between bit
         * changing and below intr_entry check. Pairs with
index d572c900bb0f208c6a49a43b97ece1cb882687f7..54d638f9ba1caea3cbd8c539b73204cd60ed19ba 100644 (file)
@@ -405,7 +405,8 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
        else
                fuse_invalidate_entry_cache(entry);
 
-       fuse_advise_use_readdirplus(dir);
+       if (inode)
+               fuse_advise_use_readdirplus(dir);
        return newent;
 
  out_iput:
@@ -1521,6 +1522,19 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
                is_truncate = true;
        }
 
+       /* Flush dirty data/metadata before non-truncate SETATTR */
+       if (is_wb && S_ISREG(inode->i_mode) &&
+           attr->ia_valid &
+                       (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_MTIME_SET |
+                        ATTR_TIMES_SET)) {
+               err = write_inode_now(inode, true);
+               if (err)
+                       return err;
+
+               fuse_set_nowrite(inode);
+               fuse_release_nowrite(inode);
+       }
+
        if (is_truncate) {
                fuse_set_nowrite(inode);
                set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
index 0f0225686aeeb2acfbeff178bfd2434e4ea80795..db48a5cf862095160179b7d205c7c8cd8c046c24 100644 (file)
@@ -217,7 +217,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        int err;
-       bool lock_inode = (file->f_flags & O_TRUNC) &&
+       bool is_wb_truncate = (file->f_flags & O_TRUNC) &&
                          fc->atomic_o_trunc &&
                          fc->writeback_cache;
 
@@ -225,16 +225,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
        if (err)
                return err;
 
-       if (lock_inode)
+       if (is_wb_truncate) {
                inode_lock(inode);
+               fuse_set_nowrite(inode);
+       }
 
        err = fuse_do_open(fc, get_node_id(inode), file, isdir);
 
        if (!err)
                fuse_finish_open(inode, file);
 
-       if (lock_inode)
+       if (is_wb_truncate) {
+               fuse_release_nowrite(inode);
                inode_unlock(inode);
+       }
 
        return err;
 }
@@ -1997,7 +2001,7 @@ static int fuse_writepages_fill(struct page *page,
 
        if (!data->ff) {
                err = -EIO;
-               data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
+               data->ff = fuse_write_file_get(fc, fi);
                if (!data->ff)
                        goto out_unlock;
        }
@@ -2042,8 +2046,6 @@ static int fuse_writepages_fill(struct page *page,
         * under writeback, so we can release the page lock.
         */
        if (data->wpa == NULL) {
-               struct fuse_inode *fi = get_fuse_inode(inode);
-
                err = -ENOMEM;
                wpa = fuse_writepage_args_alloc();
                if (!wpa) {
index 956aeaf961ae12b8c52d55fb961f3c18af66293e..d148188cfca453499d46eb6eaa9d77b645474a5a 100644 (file)
@@ -479,6 +479,7 @@ struct fuse_fs_context {
        bool destroy:1;
        bool no_control:1;
        bool no_force_umount:1;
+       bool no_mount_options:1;
        unsigned int max_read;
        unsigned int blksize;
        const char *subtype;
@@ -713,6 +714,9 @@ struct fuse_conn {
        /** Do not allow MNT_FORCE umount */
        unsigned int no_force_umount:1;
 
+       /* Do not show mount options */
+       unsigned int no_mount_options:1;
+
        /** The number of requests waiting for completion */
        atomic_t num_waiting;
 
index e040e2a2b62102bdcfbae5b5cf92a1a60996c58d..16aec32f7f3d71767746a886a95e8dfd151d24de 100644 (file)
@@ -558,6 +558,9 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
        struct super_block *sb = root->d_sb;
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
+       if (fc->no_mount_options)
+               return 0;
+
        seq_printf(m, ",user_id=%u", from_kuid_munged(fc->user_ns, fc->user_id));
        seq_printf(m, ",group_id=%u", from_kgid_munged(fc->user_ns, fc->group_id));
        if (fc->default_permissions)
@@ -1180,6 +1183,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
        fc->destroy = ctx->destroy;
        fc->no_control = ctx->no_control;
        fc->no_force_umount = ctx->no_force_umount;
+       fc->no_mount_options = ctx->no_mount_options;
 
        err = -ENOMEM;
        root = fuse_get_root_inode(sb, ctx->rootmode);
index 6af3f131e468ad3ee7bedffe41f4bb0b6b03e91c..a5c86048b96edb59f85986935206f2c53972a170 100644 (file)
@@ -30,6 +30,7 @@ struct virtio_fs_vq {
        struct virtqueue *vq;     /* protected by ->lock */
        struct work_struct done_work;
        struct list_head queued_reqs;
+       struct list_head end_reqs;      /* End these requests */
        struct delayed_work dispatch_work;
        struct fuse_dev *fud;
        bool connected;
@@ -54,6 +55,9 @@ struct virtio_fs_forget {
        struct list_head list;
 };
 
+static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
+                                struct fuse_req *req, bool in_flight);
+
 static inline struct virtio_fs_vq *vq_to_fsvq(struct virtqueue *vq)
 {
        struct virtio_fs *fs = vq->vdev->priv;
@@ -66,6 +70,19 @@ static inline struct fuse_pqueue *vq_to_fpq(struct virtqueue *vq)
        return &vq_to_fsvq(vq)->fud->pq;
 }
 
+/* Should be called with fsvq->lock held. */
+static inline void inc_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       fsvq->in_flight++;
+}
+
+/* Should be called with fsvq->lock held. */
+static inline void dec_in_flight_req(struct virtio_fs_vq *fsvq)
+{
+       WARN_ON(fsvq->in_flight <= 0);
+       fsvq->in_flight--;
+}
+
 static void release_virtio_fs_obj(struct kref *ref)
 {
        struct virtio_fs *vfs = container_of(ref, struct virtio_fs, refcount);
@@ -109,22 +126,6 @@ static void virtio_fs_drain_queue(struct virtio_fs_vq *fsvq)
        flush_delayed_work(&fsvq->dispatch_work);
 }
 
-static inline void drain_hiprio_queued_reqs(struct virtio_fs_vq *fsvq)
-{
-       struct virtio_fs_forget *forget;
-
-       spin_lock(&fsvq->lock);
-       while (1) {
-               forget = list_first_entry_or_null(&fsvq->queued_reqs,
-                                               struct virtio_fs_forget, list);
-               if (!forget)
-                       break;
-               list_del(&forget->list);
-               kfree(forget);
-       }
-       spin_unlock(&fsvq->lock);
-}
-
 static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 {
        struct virtio_fs_vq *fsvq;
@@ -132,9 +133,6 @@ static void virtio_fs_drain_all_queues(struct virtio_fs *fs)
 
        for (i = 0; i < fs->nvqs; i++) {
                fsvq = &fs->vqs[i];
-               if (i == VQ_HIPRIO)
-                       drain_hiprio_queued_reqs(fsvq);
-
                virtio_fs_drain_queue(fsvq);
        }
 }
@@ -253,14 +251,66 @@ static void virtio_fs_hiprio_done_work(struct work_struct *work)
 
                while ((req = virtqueue_get_buf(vq, &len)) != NULL) {
                        kfree(req);
-                       fsvq->in_flight--;
+                       dec_in_flight_req(fsvq);
                }
        } while (!virtqueue_enable_cb(vq) && likely(!virtqueue_is_broken(vq)));
        spin_unlock(&fsvq->lock);
 }
 
-static void virtio_fs_dummy_dispatch_work(struct work_struct *work)
+static void virtio_fs_request_dispatch_work(struct work_struct *work)
 {
+       struct fuse_req *req;
+       struct virtio_fs_vq *fsvq = container_of(work, struct virtio_fs_vq,
+                                                dispatch_work.work);
+       struct fuse_conn *fc = fsvq->fud->fc;
+       int ret;
+
+       pr_debug("virtio-fs: worker %s called.\n", __func__);
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->end_reqs, struct fuse_req,
+                                              list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       break;
+               }
+
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+               fuse_request_end(fc, req);
+       }
+
+       /* Dispatch pending requests */
+       while (1) {
+               spin_lock(&fsvq->lock);
+               req = list_first_entry_or_null(&fsvq->queued_reqs,
+                                              struct fuse_req, list);
+               if (!req) {
+                       spin_unlock(&fsvq->lock);
+                       return;
+               }
+               list_del_init(&req->list);
+               spin_unlock(&fsvq->lock);
+
+               ret = virtio_fs_enqueue_req(fsvq, req, true);
+               if (ret < 0) {
+                       if (ret == -ENOMEM || ret == -ENOSPC) {
+                               spin_lock(&fsvq->lock);
+                               list_add_tail(&req->list, &fsvq->queued_reqs);
+                               schedule_delayed_work(&fsvq->dispatch_work,
+                                                     msecs_to_jiffies(1));
+                               spin_unlock(&fsvq->lock);
+                               return;
+                       }
+                       req->out.h.error = ret;
+                       spin_lock(&fsvq->lock);
+                       dec_in_flight_req(fsvq);
+                       spin_unlock(&fsvq->lock);
+                       pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n",
+                              ret);
+                       fuse_request_end(fc, req);
+               }
+       }
 }
 
 static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
@@ -286,6 +336,7 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
 
                list_del(&forget->list);
                if (!fsvq->connected) {
+                       dec_in_flight_req(fsvq);
                        spin_unlock(&fsvq->lock);
                        kfree(forget);
                        continue;
@@ -307,13 +358,13 @@ static void virtio_fs_hiprio_dispatch_work(struct work_struct *work)
                        } else {
                                pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                         ret);
+                               dec_in_flight_req(fsvq);
                                kfree(forget);
                        }
                        spin_unlock(&fsvq->lock);
                        return;
                }
 
-               fsvq->in_flight++;
                notify = virtqueue_kick_prepare(vq);
                spin_unlock(&fsvq->lock);
 
@@ -452,7 +503,7 @@ static void virtio_fs_requests_done_work(struct work_struct *work)
 
                fuse_request_end(fc, req);
                spin_lock(&fsvq->lock);
-               fsvq->in_flight--;
+               dec_in_flight_req(fsvq);
                spin_unlock(&fsvq->lock);
        }
 }
@@ -502,6 +553,7 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
        names[VQ_HIPRIO] = fs->vqs[VQ_HIPRIO].name;
        INIT_WORK(&fs->vqs[VQ_HIPRIO].done_work, virtio_fs_hiprio_done_work);
        INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].queued_reqs);
+       INIT_LIST_HEAD(&fs->vqs[VQ_HIPRIO].end_reqs);
        INIT_DELAYED_WORK(&fs->vqs[VQ_HIPRIO].dispatch_work,
                        virtio_fs_hiprio_dispatch_work);
        spin_lock_init(&fs->vqs[VQ_HIPRIO].lock);
@@ -511,8 +563,9 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev,
                spin_lock_init(&fs->vqs[i].lock);
                INIT_WORK(&fs->vqs[i].done_work, virtio_fs_requests_done_work);
                INIT_DELAYED_WORK(&fs->vqs[i].dispatch_work,
-                                       virtio_fs_dummy_dispatch_work);
+                                 virtio_fs_request_dispatch_work);
                INIT_LIST_HEAD(&fs->vqs[i].queued_reqs);
+               INIT_LIST_HEAD(&fs->vqs[i].end_reqs);
                snprintf(fs->vqs[i].name, sizeof(fs->vqs[i].name),
                         "requests.%u", i - VQ_REQUEST);
                callbacks[i] = virtio_fs_vq_done;
@@ -708,6 +761,7 @@ __releases(fiq->lock)
                        list_add_tail(&forget->list, &fsvq->queued_reqs);
                        schedule_delayed_work(&fsvq->dispatch_work,
                                        msecs_to_jiffies(1));
+                       inc_in_flight_req(fsvq);
                } else {
                        pr_debug("virtio-fs: Could not queue FORGET: err=%d. Dropping it.\n",
                                 ret);
@@ -717,7 +771,7 @@ __releases(fiq->lock)
                goto out;
        }
 
-       fsvq->in_flight++;
+       inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -819,7 +873,7 @@ static unsigned int sg_init_fuse_args(struct scatterlist *sg,
 
 /* Add a request to a virtqueue and kick the device */
 static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
-                                struct fuse_req *req)
+                                struct fuse_req *req, bool in_flight)
 {
        /* requests need at least 4 elements */
        struct scatterlist *stack_sgs[6];
@@ -835,6 +889,7 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
        unsigned int i;
        int ret;
        bool notify;
+       struct fuse_pqueue *fpq;
 
        /* Does the sglist fit on the stack? */
        total_sgs = sg_count_fuse_req(req);
@@ -889,7 +944,17 @@ static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
                goto out;
        }
 
-       fsvq->in_flight++;
+       /* Request successfully sent. */
+       fpq = &fsvq->fud->pq;
+       spin_lock(&fpq->lock);
+       list_add_tail(&req->list, fpq->processing);
+       spin_unlock(&fpq->lock);
+       set_bit(FR_SENT, &req->flags);
+       /* matches barrier in request_wait_answer() */
+       smp_mb__after_atomic();
+
+       if (!in_flight)
+               inc_in_flight_req(fsvq);
        notify = virtqueue_kick_prepare(vq);
 
        spin_unlock(&fsvq->lock);
@@ -915,9 +980,8 @@ __releases(fiq->lock)
 {
        unsigned int queue_id = VQ_REQUEST; /* TODO multiqueue */
        struct virtio_fs *fs;
-       struct fuse_conn *fc;
        struct fuse_req *req;
-       struct fuse_pqueue *fpq;
+       struct virtio_fs_vq *fsvq;
        int ret;
 
        WARN_ON(list_empty(&fiq->pending));
@@ -928,44 +992,36 @@ __releases(fiq->lock)
        spin_unlock(&fiq->lock);
 
        fs = fiq->priv;
-       fc = fs->vqs[queue_id].fud->fc;
 
        pr_debug("%s: opcode %u unique %#llx nodeid %#llx in.len %u out.len %u\n",
                  __func__, req->in.h.opcode, req->in.h.unique,
                 req->in.h.nodeid, req->in.h.len,
                 fuse_len_args(req->args->out_numargs, req->args->out_args));
 
-       fpq = &fs->vqs[queue_id].fud->pq;
-       spin_lock(&fpq->lock);
-       if (!fpq->connected) {
-               spin_unlock(&fpq->lock);
-               req->out.h.error = -ENODEV;
-               pr_err("virtio-fs: %s disconnected\n", __func__);
-               fuse_request_end(fc, req);
-               return;
-       }
-       list_add_tail(&req->list, fpq->processing);
-       spin_unlock(&fpq->lock);
-       set_bit(FR_SENT, &req->flags);
-       /* matches barrier in request_wait_answer() */
-       smp_mb__after_atomic();
-
-retry:
-       ret = virtio_fs_enqueue_req(&fs->vqs[queue_id], req);
+       fsvq = &fs->vqs[queue_id];
+       ret = virtio_fs_enqueue_req(fsvq, req, false);
        if (ret < 0) {
                if (ret == -ENOMEM || ret == -ENOSPC) {
-                       /* Virtqueue full. Retry submission */
-                       /* TODO use completion instead of timeout */
-                       usleep_range(20, 30);
-                       goto retry;
+                       /*
+                        * Virtqueue full. Retry submission from worker
+                        * context as we might be holding fc->bg_lock.
+                        */
+                       spin_lock(&fsvq->lock);
+                       list_add_tail(&req->list, &fsvq->queued_reqs);
+                       inc_in_flight_req(fsvq);
+                       schedule_delayed_work(&fsvq->dispatch_work,
+                                               msecs_to_jiffies(1));
+                       spin_unlock(&fsvq->lock);
+                       return;
                }
                req->out.h.error = ret;
                pr_err("virtio-fs: virtio_fs_enqueue_req() failed %d\n", ret);
-               spin_lock(&fpq->lock);
-               clear_bit(FR_SENT, &req->flags);
-               list_del_init(&req->list);
-               spin_unlock(&fpq->lock);
-               fuse_request_end(fc, req);
+
+               /* Can't end request in submission context. Use a worker */
+               spin_lock(&fsvq->lock);
+               list_add_tail(&req->list, &fsvq->end_reqs);
+               schedule_delayed_work(&fsvq->dispatch_work, 0);
+               spin_unlock(&fsvq->lock);
                return;
        }
 }
@@ -992,6 +1048,7 @@ static int virtio_fs_fill_super(struct super_block *sb)
                .destroy = true,
                .no_control = true,
                .no_force_umount = true,
+               .no_mount_options = true,
        };
 
        mutex_lock(&virtio_fs_mutex);
index 681b44682b0db6a6016303ec515c291720e6ff32..18daf494abab9e2492ab12272358d151e5c5538a 100644 (file)
@@ -1540,17 +1540,23 @@ static int gfs2_init_fs_context(struct fs_context *fc)
 {
        struct gfs2_args *args;
 
-       args = kzalloc(sizeof(*args), GFP_KERNEL);
+       args = kmalloc(sizeof(*args), GFP_KERNEL);
        if (args == NULL)
                return -ENOMEM;
 
-       args->ar_quota = GFS2_QUOTA_DEFAULT;
-       args->ar_data = GFS2_DATA_DEFAULT;
-       args->ar_commit = 30;
-       args->ar_statfs_quantum = 30;
-       args->ar_quota_quantum = 60;
-       args->ar_errors = GFS2_ERRORS_DEFAULT;
+       if (fc->purpose == FS_CONTEXT_FOR_RECONFIGURE) {
+               struct gfs2_sbd *sdp = fc->root->d_sb->s_fs_info;
 
+               *args = sdp->sd_args;
+       } else {
+               memset(args, 0, sizeof(*args));
+               args->ar_quota = GFS2_QUOTA_DEFAULT;
+               args->ar_data = GFS2_DATA_DEFAULT;
+               args->ar_commit = 30;
+               args->ar_statfs_quantum = 30;
+               args->ar_quota_quantum = 60;
+               args->ar_errors = GFS2_ERRORS_DEFAULT;
+       }
        fc->fs_private = args;
        fc->ops = &gfs2_context_ops;
        return 0;
@@ -1600,6 +1606,7 @@ static int gfs2_meta_get_tree(struct fs_context *fc)
 }
 
 static const struct fs_context_operations gfs2_meta_context_ops = {
+       .free        = gfs2_fc_free,
        .get_tree    = gfs2_meta_get_tree,
 };
 
index aa8ac557493cb5e20f1ac78a87bb6b8e6e6f6fe8..f9a38998f2fc515c94c4cc292e00450a55aaa912 100644 (file)
@@ -197,6 +197,7 @@ struct io_ring_ctx {
                unsigned                sq_entries;
                unsigned                sq_mask;
                unsigned                sq_thread_idle;
+               unsigned                cached_sq_dropped;
                struct io_uring_sqe     *sq_sqes;
 
                struct list_head        defer_list;
@@ -212,6 +213,7 @@ struct io_ring_ctx {
 
        struct {
                unsigned                cached_cq_tail;
+               atomic_t                cached_cq_overflow;
                unsigned                cq_entries;
                unsigned                cq_mask;
                struct wait_queue_head  cq_wait;
@@ -322,6 +324,8 @@ struct io_kiocb {
 #define REQ_F_FAIL_LINK                256     /* fail rest of links */
 #define REQ_F_SHADOW_DRAIN     512     /* link-drain shadow req */
 #define REQ_F_TIMEOUT          1024    /* timeout request */
+#define REQ_F_ISREG            2048    /* regular file */
+#define REQ_F_MUST_PUNT                4096    /* must be punted even for NONBLOCK */
        u64                     user_data;
        u32                     result;
        u32                     sequence;
@@ -415,27 +419,28 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
        return ctx;
 }
 
+static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
+                                      struct io_kiocb *req)
+{
+       return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
+                                       + atomic_read(&ctx->cached_cq_overflow);
+}
+
 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
                                     struct io_kiocb *req)
 {
-       /* timeout requests always honor sequence */
-       if (!(req->flags & REQ_F_TIMEOUT) &&
-           (req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
+       if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
                return false;
 
-       return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
+       return __io_sequence_defer(ctx, req);
 }
 
-static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
-                                             struct list_head *list)
+static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
 {
        struct io_kiocb *req;
 
-       if (list_empty(list))
-               return NULL;
-
-       req = list_first_entry(list, struct io_kiocb, list);
-       if (!io_sequence_defer(ctx, req)) {
+       req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
+       if (req && !io_sequence_defer(ctx, req)) {
                list_del_init(&req->list);
                return req;
        }
@@ -443,14 +448,17 @@ static struct io_kiocb *__io_get_deferred_req(struct io_ring_ctx *ctx,
        return NULL;
 }
 
-static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
-{
-       return __io_get_deferred_req(ctx, &ctx->defer_list);
-}
-
 static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
 {
-       return __io_get_deferred_req(ctx, &ctx->timeout_list);
+       struct io_kiocb *req;
+
+       req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
+       if (req && !__io_sequence_defer(ctx, req)) {
+               list_del_init(&req->list);
+               return req;
+       }
+
+       return NULL;
 }
 
 static void __io_commit_cqring(struct io_ring_ctx *ctx)
@@ -562,9 +570,8 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
                WRITE_ONCE(cqe->res, res);
                WRITE_ONCE(cqe->flags, 0);
        } else {
-               unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
-
-               WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
+               WRITE_ONCE(ctx->rings->cq_overflow,
+                               atomic_inc_return(&ctx->cached_cq_overflow));
        }
 }
 
@@ -591,14 +598,6 @@ static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
        io_cqring_ev_posted(ctx);
 }
 
-static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs)
-{
-       percpu_ref_put_many(&ctx->refs, refs);
-
-       if (waitqueue_active(&ctx->wait))
-               wake_up(&ctx->wait);
-}
-
 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
                                   struct io_submit_state *state)
 {
@@ -646,7 +645,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
        req->result = 0;
        return req;
 out:
-       io_ring_drop_ctx_refs(ctx, 1);
+       percpu_ref_put(&ctx->refs);
        return NULL;
 }
 
@@ -654,7 +653,7 @@ static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
 {
        if (*nr) {
                kmem_cache_free_bulk(req_cachep, *nr, reqs);
-               io_ring_drop_ctx_refs(ctx, *nr);
+               percpu_ref_put_many(&ctx->refs, *nr);
                *nr = 0;
        }
 }
@@ -663,7 +662,7 @@ static void __io_free_req(struct io_kiocb *req)
 {
        if (req->file && !(req->flags & REQ_F_FIXED_FILE))
                fput(req->file);
-       io_ring_drop_ctx_refs(req->ctx, 1);
+       percpu_ref_put(&req->ctx->refs);
        kmem_cache_free(req_cachep, req);
 }
 
@@ -738,6 +737,14 @@ static unsigned io_cqring_events(struct io_rings *rings)
        return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
 }
 
+static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
+{
+       struct io_rings *rings = ctx->rings;
+
+       /* make sure SQ entry isn't read before tail */
+       return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
+}
+
 /*
  * Find and free completed poll iocbs
  */
@@ -867,19 +874,11 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
        mutex_unlock(&ctx->uring_lock);
 }
 
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
-                          long min)
+static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                           long min)
 {
-       int iters, ret = 0;
+       int iters = 0, ret = 0;
 
-       /*
-        * We disallow the app entering submit/complete with polling, but we
-        * still need to lock the ring to prevent racing with polled issue
-        * that got punted to a workqueue.
-        */
-       mutex_lock(&ctx->uring_lock);
-
-       iters = 0;
        do {
                int tmin = 0;
 
@@ -915,30 +914,45 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
                ret = 0;
        } while (min && !*nr_events && !need_resched());
 
+       return ret;
+}
+
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+                          long min)
+{
+       int ret;
+
+       /*
+        * We disallow the app entering submit/complete with polling, but we
+        * still need to lock the ring to prevent racing with polled issue
+        * that got punted to a workqueue.
+        */
+       mutex_lock(&ctx->uring_lock);
+       ret = __io_iopoll_check(ctx, nr_events, min);
        mutex_unlock(&ctx->uring_lock);
        return ret;
 }
 
-static void kiocb_end_write(struct kiocb *kiocb)
+static void kiocb_end_write(struct io_kiocb *req)
 {
-       if (kiocb->ki_flags & IOCB_WRITE) {
-               struct inode *inode = file_inode(kiocb->ki_filp);
+       /*
+        * Tell lockdep we inherited freeze protection from submission
+        * thread.
+        */
+       if (req->flags & REQ_F_ISREG) {
+               struct inode *inode = file_inode(req->file);
 
-               /*
-                * Tell lockdep we inherited freeze protection from submission
-                * thread.
-                */
-               if (S_ISREG(inode->i_mode))
-                       __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
-               file_end_write(kiocb->ki_filp);
+               __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
        }
+       file_end_write(req->file);
 }
 
 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
-       kiocb_end_write(kiocb);
+       if (kiocb->ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
 
        if ((req->flags & REQ_F_LINK) && res != req->result)
                req->flags |= REQ_F_FAIL_LINK;
@@ -950,7 +964,8 @@ static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
 {
        struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
 
-       kiocb_end_write(kiocb);
+       if (kiocb->ki_flags & IOCB_WRITE)
+               kiocb_end_write(req);
 
        if ((req->flags & REQ_F_LINK) && res != req->result)
                req->flags |= REQ_F_FAIL_LINK;
@@ -1064,8 +1079,17 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
        if (!req->file)
                return -EBADF;
 
-       if (force_nonblock && !io_file_supports_async(req->file))
-               force_nonblock = false;
+       if (S_ISREG(file_inode(req->file)->i_mode))
+               req->flags |= REQ_F_ISREG;
+
+       /*
+        * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
+        * we know to async punt it even if it was opened O_NONBLOCK
+        */
+       if (force_nonblock && !io_file_supports_async(req->file)) {
+               req->flags |= REQ_F_MUST_PUNT;
+               return -EAGAIN;
+       }
 
        kiocb->ki_pos = READ_ONCE(sqe->off);
        kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
@@ -1086,7 +1110,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
                return ret;
 
        /* don't allow async punt if RWF_NOWAIT was requested */
-       if (kiocb->ki_flags & IOCB_NOWAIT)
+       if ((kiocb->ki_flags & IOCB_NOWAIT) ||
+           (req->file->f_flags & O_NONBLOCK))
                req->flags |= REQ_F_NOWAIT;
 
        if (force_nonblock)
@@ -1099,6 +1124,7 @@ static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
 
                kiocb->ki_flags |= IOCB_HIPRI;
                kiocb->ki_complete = io_complete_rw_iopoll;
+               req->result = 0;
        } else {
                if (kiocb->ki_flags & IOCB_HIPRI)
                        return -EINVAL;
@@ -1387,7 +1413,9 @@ static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
                 * need async punt anyway, so it's more efficient to do it
                 * here.
                 */
-               if (force_nonblock && ret2 > 0 && ret2 < read_size)
+               if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
+                   (req->flags & REQ_F_ISREG) &&
+                   ret2 > 0 && ret2 < read_size)
                        ret2 = -EAGAIN;
                /* Catch -EAGAIN return for forced non-blocking submission */
                if (!force_nonblock || ret2 != -EAGAIN) {
@@ -1452,7 +1480,7 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
                 * released so that it doesn't complain about the held lock when
                 * we return to userspace.
                 */
-               if (S_ISREG(file_inode(file)->i_mode)) {
+               if (req->flags & REQ_F_ISREG) {
                        __sb_start_write(file_inode(file)->i_sb,
                                                SB_FREEZE_WRITE, true);
                        __sb_writers_release(file_inode(file)->i_sb,
@@ -1867,7 +1895,7 @@ static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 {
        struct io_ring_ctx *ctx;
-       struct io_kiocb *req;
+       struct io_kiocb *req, *prev;
        unsigned long flags;
 
        req = container_of(timer, struct io_kiocb, timeout.timer);
@@ -1875,6 +1903,15 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
        atomic_inc(&ctx->cq_timeouts);
 
        spin_lock_irqsave(&ctx->completion_lock, flags);
+       /*
+        * Adjust the reqs sequence before the current one because it
+        * will consume a slot in the cq_ring and the the cq_tail pointer
+        * will be increased, otherwise other timeout reqs may return in
+        * advance without waiting for enough wait_nr.
+        */
+       prev = req;
+       list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
+               prev->sequence++;
        list_del(&req->list);
 
        io_cqring_fill_event(ctx, req->user_data, -ETIME);
@@ -1889,18 +1926,19 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
 
 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
-       unsigned count, req_dist, tail_index;
+       unsigned count;
        struct io_ring_ctx *ctx = req->ctx;
        struct list_head *entry;
-       struct timespec ts;
+       struct timespec64 ts;
+       unsigned span = 0;
 
        if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
                return -EINVAL;
        if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
            sqe->len != 1)
                return -EINVAL;
-       if (copy_from_user(&ts, (void __user *) (unsigned long) sqe->addr,
-           sizeof(ts)))
+
+       if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
                return -EFAULT;
 
        /*
@@ -1912,29 +1950,52 @@ static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
                count = 1;
 
        req->sequence = ctx->cached_sq_head + count - 1;
+       /* reuse it to store the count */
+       req->submit.sequence = count;
        req->flags |= REQ_F_TIMEOUT;
 
        /*
         * Insertion sort, ensuring the first entry in the list is always
         * the one we need first.
         */
-       tail_index = ctx->cached_cq_tail - ctx->rings->sq_dropped;
-       req_dist = req->sequence - tail_index;
        spin_lock_irq(&ctx->completion_lock);
        list_for_each_prev(entry, &ctx->timeout_list) {
                struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
-               unsigned dist;
+               unsigned nxt_sq_head;
+               long long tmp, tmp_nxt;
 
-               dist = nxt->sequence - tail_index;
-               if (req_dist >= dist)
+               /*
+                * Since cached_sq_head + count - 1 can overflow, use type long
+                * long to store it.
+                */
+               tmp = (long long)ctx->cached_sq_head + count - 1;
+               nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
+               tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
+
+               /*
+                * cached_sq_head may overflow, and it will never overflow twice
+                * once there is some timeout req still be valid.
+                */
+               if (ctx->cached_sq_head < nxt_sq_head)
+                       tmp += UINT_MAX;
+
+               if (tmp > tmp_nxt)
                        break;
+
+               /*
+                * Sequence of reqs after the insert one and itself should
+                * be adjusted because each timeout req consumes a slot.
+                */
+               span++;
+               nxt->sequence++;
        }
+       req->sequence -= span;
        list_add(&req->list, entry);
        spin_unlock_irq(&ctx->completion_lock);
 
        hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        req->timeout.timer.function = io_timeout_fn;
-       hrtimer_start(&req->timeout.timer, timespec_to_ktime(ts),
+       hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
                        HRTIMER_MODE_REL);
        return 0;
 }
@@ -2267,12 +2328,18 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
 }
 
 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
-       ret = __io_submit_sqe(ctx, req, s, force_nonblock);
-       if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
+       ret = __io_submit_sqe(ctx, req, s, true);
+
+       /*
+        * We async punt it if the file wasn't marked NOWAIT, or if the file
+        * doesn't support non-blocking read/write attempts
+        */
+       if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
+           (req->flags & REQ_F_MUST_PUNT))) {
                struct io_uring_sqe *sqe_copy;
 
                sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
@@ -2312,7 +2379,7 @@ static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
 }
 
 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                       struct sqe_submit *s, bool force_nonblock)
+                       struct sqe_submit *s)
 {
        int ret;
 
@@ -2325,18 +2392,17 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
                return 0;
        }
 
-       return __io_queue_sqe(ctx, req, s, force_nonblock);
+       return __io_queue_sqe(ctx, req, s);
 }
 
 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
-                             struct sqe_submit *s, struct io_kiocb *shadow,
-                             bool force_nonblock)
+                             struct sqe_submit *s, struct io_kiocb *shadow)
 {
        int ret;
        int need_submit = false;
 
        if (!shadow)
-               return io_queue_sqe(ctx, req, s, force_nonblock);
+               return io_queue_sqe(ctx, req, s);
 
        /*
         * Mark the first IO in link list as DRAIN, let all the following
@@ -2348,6 +2414,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        if (ret) {
                if (ret != -EIOCBQUEUED) {
                        io_free_req(req);
+                       __io_free_req(shadow);
                        io_cqring_add_event(ctx, s->sqe->user_data, ret);
                        return 0;
                }
@@ -2365,7 +2432,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
        spin_unlock_irq(&ctx->completion_lock);
 
        if (need_submit)
-               return __io_queue_sqe(ctx, req, s, force_nonblock);
+               return __io_queue_sqe(ctx, req, s);
 
        return 0;
 }
@@ -2373,8 +2440,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
 #define SQE_VALID_FLAGS        (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
 
 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
-                         struct io_submit_state *state, struct io_kiocb **link,
-                         bool force_nonblock)
+                         struct io_submit_state *state, struct io_kiocb **link)
 {
        struct io_uring_sqe *sqe_copy;
        struct io_kiocb *req;
@@ -2401,6 +2467,8 @@ err:
                return;
        }
 
+       req->user_data = s->sqe->user_data;
+
        /*
         * If we already have a head request, queue this one for async
         * submittal once the head completes. If we don't have a head but
@@ -2427,7 +2495,7 @@ err:
                INIT_LIST_HEAD(&req->link_list);
                *link = req;
        } else {
-               io_queue_sqe(ctx, req, s, force_nonblock);
+               io_queue_sqe(ctx, req, s);
        }
 }
 
@@ -2507,12 +2575,13 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
 
        /* drop invalid entries */
        ctx->cached_sq_head++;
-       rings->sq_dropped++;
+       ctx->cached_sq_dropped++;
+       WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
        return false;
 }
 
-static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
-                         unsigned int nr, bool has_user, bool mm_fault)
+static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
+                         bool has_user, bool mm_fault)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2526,19 +2595,23 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
        }
 
        for (i = 0; i < nr; i++) {
+               struct sqe_submit s;
+
+               if (!io_get_sqring(ctx, &s))
+                       break;
+
                /*
                 * If previous wasn't linked and we have a linked command,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               true);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
-               prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+               prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
 
-               if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+               if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
                        if (!shadow_req) {
                                shadow_req = io_get_req(ctx, NULL);
                                if (unlikely(!shadow_req))
@@ -2546,24 +2619,24 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
                                shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
                                refcount_dec(&shadow_req->refs);
                        }
-                       shadow_req->sequence = sqes[i].sequence;
+                       shadow_req->sequence = s.sequence;
                }
 
 out:
                if (unlikely(mm_fault)) {
-                       io_cqring_add_event(ctx, sqes[i].sqe->user_data,
+                       io_cqring_add_event(ctx, s.sqe->user_data,
                                                -EFAULT);
                } else {
-                       sqes[i].has_user = has_user;
-                       sqes[i].needs_lock = true;
-                       sqes[i].needs_fixed_file = true;
-                       io_submit_sqe(ctx, &sqes[i], statep, &link, true);
+                       s.has_user = has_user;
+                       s.needs_lock = true;
+                       s.needs_fixed_file = true;
+                       io_submit_sqe(ctx, &s, statep, &link);
                        submitted++;
                }
        }
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(&state);
 
@@ -2572,7 +2645,6 @@ out:
 
 static int io_sq_thread(void *data)
 {
-       struct sqe_submit sqes[IO_IOPOLL_BATCH];
        struct io_ring_ctx *ctx = data;
        struct mm_struct *cur_mm = NULL;
        mm_segment_t old_fs;
@@ -2587,14 +2659,27 @@ static int io_sq_thread(void *data)
 
        timeout = inflight = 0;
        while (!kthread_should_park()) {
-               bool all_fixed, mm_fault = false;
-               int i;
+               bool mm_fault = false;
+               unsigned int to_submit;
 
                if (inflight) {
                        unsigned nr_events = 0;
 
                        if (ctx->flags & IORING_SETUP_IOPOLL) {
-                               io_iopoll_check(ctx, &nr_events, 0);
+                               /*
+                                * inflight is the count of the maximum possible
+                                * entries we submitted, but it can be smaller
+                                * if we dropped some of them. If we don't have
+                                * poll entries available, then we know that we
+                                * have nothing left to poll for. Reset the
+                                * inflight count to zero in that case.
+                                */
+                               mutex_lock(&ctx->uring_lock);
+                               if (!list_empty(&ctx->poll_list))
+                                       __io_iopoll_check(ctx, &nr_events, 0);
+                               else
+                                       inflight = 0;
+                               mutex_unlock(&ctx->uring_lock);
                        } else {
                                /*
                                 * Normal IO, just pretend everything completed.
@@ -2608,7 +2693,8 @@ static int io_sq_thread(void *data)
                                timeout = jiffies + ctx->sq_thread_idle;
                }
 
-               if (!io_get_sqring(ctx, &sqes[0])) {
+               to_submit = io_sqring_entries(ctx);
+               if (!to_submit) {
                        /*
                         * We're polling. If we're within the defined idle
                         * period, then let us spin without work before going
@@ -2639,7 +2725,8 @@ static int io_sq_thread(void *data)
                        /* make sure to read SQ tail after writing flags */
                        smp_mb();
 
-                       if (!io_get_sqring(ctx, &sqes[0])) {
+                       to_submit = io_sqring_entries(ctx);
+                       if (!to_submit) {
                                if (kthread_should_park()) {
                                        finish_wait(&ctx->sqo_wait, &wait);
                                        break;
@@ -2657,19 +2744,8 @@ static int io_sq_thread(void *data)
                        ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
                }
 
-               i = 0;
-               all_fixed = true;
-               do {
-                       if (all_fixed && io_sqe_needs_user(sqes[i].sqe))
-                               all_fixed = false;
-
-                       i++;
-                       if (i == ARRAY_SIZE(sqes))
-                               break;
-               } while (io_get_sqring(ctx, &sqes[i]));
-
                /* Unless all new commands are FIXED regions, grab mm */
-               if (!all_fixed && !cur_mm) {
+               if (!cur_mm) {
                        mm_fault = !mmget_not_zero(ctx->sqo_mm);
                        if (!mm_fault) {
                                use_mm(ctx->sqo_mm);
@@ -2677,8 +2753,9 @@ static int io_sq_thread(void *data)
                        }
                }
 
-               inflight += io_submit_sqes(ctx, sqes, i, cur_mm != NULL,
-                                               mm_fault);
+               to_submit = min(to_submit, ctx->sq_entries);
+               inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
+                                          mm_fault);
 
                /* Commit SQ ring head once we've consumed all SQEs */
                io_commit_sqring(ctx);
@@ -2695,8 +2772,7 @@ static int io_sq_thread(void *data)
        return 0;
 }
 
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
-                         bool block_for_last)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
 {
        struct io_submit_state state, *statep = NULL;
        struct io_kiocb *link = NULL;
@@ -2710,7 +2786,6 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
        }
 
        for (i = 0; i < to_submit; i++) {
-               bool force_nonblock = true;
                struct sqe_submit s;
 
                if (!io_get_sqring(ctx, &s))
@@ -2721,8 +2796,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
                 * that's the end of the chain. Submit the previous link.
                 */
                if (!prev_was_link && link) {
-                       io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                               force_nonblock);
+                       io_queue_link_head(ctx, link, &link->submit, shadow_req);
                        link = NULL;
                        shadow_req = NULL;
                }
@@ -2744,27 +2818,16 @@ out:
                s.needs_lock = false;
                s.needs_fixed_file = false;
                submit++;
-
-               /*
-                * The caller will block for events after submit, submit the
-                * last IO non-blocking. This is either the only IO it's
-                * submitting, or it already submitted the previous ones. This
-                * improves performance by avoiding an async punt that we don't
-                * need to do.
-                */
-               if (block_for_last && submit == to_submit)
-                       force_nonblock = false;
-
-               io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
+               io_submit_sqe(ctx, &s, statep, &link);
        }
-       io_commit_sqring(ctx);
 
        if (link)
-               io_queue_link_head(ctx, link, &link->submit, shadow_req,
-                                       block_for_last);
+               io_queue_link_head(ctx, link, &link->submit, shadow_req);
        if (statep)
                io_submit_state_end(statep);
 
+       io_commit_sqring(ctx);
+
        return submit;
 }
 
@@ -2920,8 +2983,12 @@ static void io_finish_async(struct io_ring_ctx *ctx)
 static void io_destruct_skb(struct sk_buff *skb)
 {
        struct io_ring_ctx *ctx = skb->sk->sk_user_data;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
+               if (ctx->sqo_wq[i])
+                       flush_workqueue(ctx->sqo_wq[i]);
 
-       io_finish_async(ctx);
        unix_destruct_scm(skb);
 }
 
@@ -3601,21 +3668,10 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                        wake_up(&ctx->sqo_wait);
                submitted = to_submit;
        } else if (to_submit) {
-               bool block_for_last = false;
-
                to_submit = min(to_submit, ctx->sq_entries);
 
-               /*
-                * Allow last submission to block in a series, IFF the caller
-                * asked to wait for events and we don't currently have
-                * enough. This potentially avoids an async punt.
-                */
-               if (to_submit == min_complete &&
-                   io_cqring_events(ctx->rings) < min_complete)
-                       block_for_last = true;
-
                mutex_lock(&ctx->uring_lock);
-               submitted = io_ring_submit(ctx, to_submit, block_for_last);
+               submitted = io_ring_submit(ctx, to_submit);
                mutex_unlock(&ctx->uring_lock);
        }
        if (flags & IORING_ENTER_GETEVENTS) {
@@ -3630,7 +3686,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
                }
        }
 
-       io_ring_drop_ctx_refs(ctx, 1);
+       percpu_ref_put(&ctx->refs);
 out_fput:
        fdput(f);
        return submitted ? submitted : ret;
@@ -3774,10 +3830,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        if (ret)
                goto err;
 
-       ret = io_uring_get_fd(ctx);
-       if (ret < 0)
-               goto err;
-
        memset(&p->sq_off, 0, sizeof(p->sq_off));
        p->sq_off.head = offsetof(struct io_rings, sq.head);
        p->sq_off.tail = offsetof(struct io_rings, sq.tail);
@@ -3795,6 +3847,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
        p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
        p->cq_off.cqes = offsetof(struct io_rings, cqes);
 
+       /*
+        * Install ring fd as the very last thing, so we don't risk someone
+        * having closed it before we finish setup
+        */
+       ret = io_uring_get_fd(ctx);
+       if (ret < 0)
+               goto err;
+
        p->features = IORING_FEAT_SINGLE_MMAP;
        return ret;
 err:
index c9b2850c0f7ce622443a960c2a1dbb9f03177c01..1463b038ffc4e6f34dd5e213820df57a67106640 100644 (file)
@@ -89,58 +89,45 @@ int dcache_dir_close(struct inode *inode, struct file *file)
 EXPORT_SYMBOL(dcache_dir_close);
 
 /* parent is locked at least shared */
-static struct dentry *next_positive(struct dentry *parent,
-                                   struct list_head *from,
-                                   int count)
+/*
+ * Returns an element of siblings' list.
+ * We are looking for <count>th positive after <p>; if
+ * found, dentry is grabbed and returned to caller.
+ * If no such element exists, NULL is returned.
+ */
+static struct dentry *scan_positives(struct dentry *cursor,
+                                       struct list_head *p,
+                                       loff_t count,
+                                       struct dentry *last)
 {
-       unsigned *seq = &parent->d_inode->i_dir_seq, n;
-       struct dentry *res;
-       struct list_head *p;
-       bool skipped;
-       int i;
+       struct dentry *dentry = cursor->d_parent, *found = NULL;
 
-retry:
-       i = count;
-       skipped = false;
-       n = smp_load_acquire(seq) & ~1;
-       res = NULL;
-       rcu_read_lock();
-       for (p = from->next; p != &parent->d_subdirs; p = p->next) {
+       spin_lock(&dentry->d_lock);
+       while ((p = p->next) != &dentry->d_subdirs) {
                struct dentry *d = list_entry(p, struct dentry, d_child);
-               if (!simple_positive(d)) {
-                       skipped = true;
-               } else if (!--i) {
-                       res = d;
-                       break;
+               // we must at least skip cursors, to avoid livelocks
+               if (d->d_flags & DCACHE_DENTRY_CURSOR)
+                       continue;
+               if (simple_positive(d) && !--count) {
+                       spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
+                       if (simple_positive(d))
+                               found = dget_dlock(d);
+                       spin_unlock(&d->d_lock);
+                       if (likely(found))
+                               break;
+                       count = 1;
+               }
+               if (need_resched()) {
+                       list_move(&cursor->d_child, p);
+                       p = &cursor->d_child;
+                       spin_unlock(&dentry->d_lock);
+                       cond_resched();
+                       spin_lock(&dentry->d_lock);
                }
        }
-       rcu_read_unlock();
-       if (skipped) {
-               smp_rmb();
-               if (unlikely(*seq != n))
-                       goto retry;
-       }
-       return res;
-}
-
-static void move_cursor(struct dentry *cursor, struct list_head *after)
-{
-       struct dentry *parent = cursor->d_parent;
-       unsigned n, *seq = &parent->d_inode->i_dir_seq;
-       spin_lock(&parent->d_lock);
-       for (;;) {
-               n = *seq;
-               if (!(n & 1) && cmpxchg(seq, n, n + 1) == n)
-                       break;
-               cpu_relax();
-       }
-       __list_del(cursor->d_child.prev, cursor->d_child.next);
-       if (after)
-               list_add(&cursor->d_child, after);
-       else
-               list_add_tail(&cursor->d_child, &parent->d_subdirs);
-       smp_store_release(seq, n + 2);
-       spin_unlock(&parent->d_lock);
+       spin_unlock(&dentry->d_lock);
+       dput(last);
+       return found;
 }
 
 loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
@@ -158,17 +145,25 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence)
                        return -EINVAL;
        }
        if (offset != file->f_pos) {
+               struct dentry *cursor = file->private_data;
+               struct dentry *to = NULL;
+
+               inode_lock_shared(dentry->d_inode);
+
+               if (offset > 2)
+                       to = scan_positives(cursor, &dentry->d_subdirs,
+                                           offset - 2, NULL);
+               spin_lock(&dentry->d_lock);
+               if (to)
+                       list_move(&cursor->d_child, &to->d_child);
+               else
+                       list_del_init(&cursor->d_child);
+               spin_unlock(&dentry->d_lock);
+               dput(to);
+
                file->f_pos = offset;
-               if (file->f_pos >= 2) {
-                       struct dentry *cursor = file->private_data;
-                       struct dentry *to;
-                       loff_t n = file->f_pos - 2;
-
-                       inode_lock_shared(dentry->d_inode);
-                       to = next_positive(dentry, &dentry->d_subdirs, n);
-                       move_cursor(cursor, to ? &to->d_child : NULL);
-                       inode_unlock_shared(dentry->d_inode);
-               }
+
+               inode_unlock_shared(dentry->d_inode);
        }
        return offset;
 }
@@ -190,25 +185,35 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
 {
        struct dentry *dentry = file->f_path.dentry;
        struct dentry *cursor = file->private_data;
-       struct list_head *p = &cursor->d_child;
-       struct dentry *next;
-       bool moved = false;
+       struct list_head *anchor = &dentry->d_subdirs;
+       struct dentry *next = NULL;
+       struct list_head *p;
 
        if (!dir_emit_dots(file, ctx))
                return 0;
 
        if (ctx->pos == 2)
-               p = &dentry->d_subdirs;
-       while ((next = next_positive(dentry, p, 1)) != NULL) {
+               p = anchor;
+       else if (!list_empty(&cursor->d_child))
+               p = &cursor->d_child;
+       else
+               return 0;
+
+       while ((next = scan_positives(cursor, p, 1, next)) != NULL) {
                if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
                              d_inode(next)->i_ino, dt_type(d_inode(next))))
                        break;
-               moved = true;
-               p = &next->d_child;
                ctx->pos++;
+               p = &next->d_child;
        }
-       if (moved)
-               move_cursor(cursor, p);
+       spin_lock(&dentry->d_lock);
+       if (next)
+               list_move_tail(&cursor->d_child, &next->d_child);
+       else
+               list_del_init(&cursor->d_child);
+       spin_unlock(&dentry->d_lock);
+       dput(next);
+
        return 0;
 }
 EXPORT_SYMBOL(dcache_readdir);
@@ -468,8 +473,7 @@ EXPORT_SYMBOL(simple_write_begin);
 
 /**
  * simple_write_end - .write_end helper for non-block-device FSes
- * @available: See .write_end of address_space_operations
- * @file:              "
+ * @file: See .write_end of address_space_operations
  * @mapping:           "
  * @pos:               "
  * @len:               "
index 071b90a45933a4b95c176e8042a29130e07a71f1..af549d70ec507e3f8deb920c3a93509bd59a5ed2 100644 (file)
@@ -53,6 +53,16 @@ nfs4_is_valid_delegation(const struct nfs_delegation *delegation,
        return false;
 }
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (nfs4_is_valid_delegation(delegation, 0))
+               return delegation;
+       return NULL;
+}
+
 static int
 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark)
 {
@@ -1181,7 +1191,7 @@ bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
        if (delegation != NULL &&
            nfs4_stateid_match_other(dst, &delegation->stateid)) {
                dst->seqid = delegation->stateid.seqid;
-               return ret;
+               ret = true;
        }
        rcu_read_unlock();
 out:
index 9eb87ae4c98276632ab799e5b66487df9cdcd3e4..8b14d441e699bc472a4a4c60c5e969384aca0233 100644 (file)
@@ -68,6 +68,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
 bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
 bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
 
+struct nfs_delegation *nfs4_get_valid_delegation(const struct inode *inode);
 void nfs_mark_delegation_referenced(struct nfs_delegation *delegation);
 int nfs4_have_delegation(struct inode *inode, fmode_t flags);
 int nfs4_check_delegation(struct inode *inode, fmode_t flags);
index 222d7115db713a76d1e4312ec1c22752e38f3709..040a50fd9bf307d9e2bdd25643a3db9f73dcb64e 100644 (file)
 
 static struct kmem_cache *nfs_direct_cachep;
 
-/*
- * This represents a set of asynchronous requests that we're waiting on
- */
-struct nfs_direct_mirror {
-       ssize_t count;
-};
-
 struct nfs_direct_req {
        struct kref             kref;           /* release manager */
 
@@ -84,9 +77,6 @@ struct nfs_direct_req {
        atomic_t                io_count;       /* i/os we're waiting for */
        spinlock_t              lock;           /* protect completion state */
 
-       struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
-       int                     mirror_count;
-
        loff_t                  io_start;       /* Start offset for I/O */
        ssize_t                 count,          /* bytes actually processed */
                                max_count,      /* max expected count */
@@ -123,32 +113,42 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 }
 
 static void
-nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
+nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
+                           const struct nfs_pgio_header *hdr,
+                           ssize_t dreq_len)
 {
-       int i;
-       ssize_t count;
+       if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
+             test_bit(NFS_IOHDR_EOF, &hdr->flags)))
+               return;
+       if (dreq->max_count >= dreq_len) {
+               dreq->max_count = dreq_len;
+               if (dreq->count > dreq_len)
+                       dreq->count = dreq_len;
+
+               if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
+                       dreq->error = hdr->error;
+               else /* Clear outstanding error if this is EOF */
+                       dreq->error = 0;
+       }
+}
 
-       WARN_ON_ONCE(dreq->count >= dreq->max_count);
+static void
+nfs_direct_count_bytes(struct nfs_direct_req *dreq,
+                      const struct nfs_pgio_header *hdr)
+{
+       loff_t hdr_end = hdr->io_start + hdr->good_bytes;
+       ssize_t dreq_len = 0;
 
-       if (dreq->mirror_count == 1) {
-               dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
-               dreq->count += hdr->good_bytes;
-       } else {
-               /* mirrored writes */
-               count = dreq->mirrors[hdr->pgio_mirror_idx].count;
-               if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
-                       count = hdr->io_start + hdr->good_bytes - dreq->io_start;
-                       dreq->mirrors[hdr->pgio_mirror_idx].count = count;
-               }
-               /* update the dreq->count by finding the minimum agreed count from all
-                * mirrors */
-               count = dreq->mirrors[0].count;
+       if (hdr_end > dreq->io_start)
+               dreq_len = hdr_end - dreq->io_start;
 
-               for (i = 1; i < dreq->mirror_count; i++)
-                       count = min(count, dreq->mirrors[i].count);
+       nfs_direct_handle_truncated(dreq, hdr, dreq_len);
 
-               dreq->count = count;
-       }
+       if (dreq_len > dreq->max_count)
+               dreq_len = dreq->max_count;
+
+       if (dreq->count < dreq_len)
+               dreq->count = dreq_len;
 }
 
 /*
@@ -293,18 +293,6 @@ void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
        cinfo->completion_ops = &nfs_direct_commit_completion_ops;
 }
 
-static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
-                                            struct nfs_pageio_descriptor *pgio,
-                                            struct nfs_page *req)
-{
-       int mirror_count = 1;
-
-       if (pgio->pg_ops->pg_get_mirror_count)
-               mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
-
-       dreq->mirror_count = mirror_count;
-}
-
 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
 {
        struct nfs_direct_req *dreq;
@@ -319,7 +307,6 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
        INIT_LIST_HEAD(&dreq->mds_cinfo.list);
        dreq->verf.committed = NFS_INVALID_STABLE_HOW;  /* not set yet */
        INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
-       dreq->mirror_count = 1;
        spin_lock_init(&dreq->lock);
 
        return dreq;
@@ -402,20 +389,12 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
        struct nfs_direct_req *dreq = hdr->dreq;
 
        spin_lock(&dreq->lock);
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-               dreq->error = hdr->error;
-
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                spin_unlock(&dreq->lock);
                goto out_put;
        }
 
-       if (hdr->good_bytes != 0)
-               nfs_direct_good_bytes(dreq, hdr);
-
-       if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
-               dreq->error = 0;
-
+       nfs_direct_count_bytes(dreq, hdr);
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
@@ -646,29 +625,22 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        LIST_HEAD(reqs);
        struct nfs_commit_info cinfo;
        LIST_HEAD(failed);
-       int i;
 
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
        nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
 
        dreq->count = 0;
+       dreq->max_count = 0;
+       list_for_each_entry(req, &reqs, wb_list)
+               dreq->max_count += req->wb_bytes;
        dreq->verf.committed = NFS_INVALID_STABLE_HOW;
        nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
-       for (i = 0; i < dreq->mirror_count; i++)
-               dreq->mirrors[i].count = 0;
        get_dreq(dreq);
 
        nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
                              &nfs_direct_write_completion_ops);
        desc.pg_dreq = dreq;
 
-       req = nfs_list_entry(reqs.next);
-       nfs_direct_setup_mirroring(dreq, &desc, req);
-       if (desc.pg_error < 0) {
-               list_splice_init(&reqs, &failed);
-               goto out_failed;
-       }
-
        list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
                /* Bump the transmission count */
                req->wb_nio++;
@@ -686,7 +658,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
        }
        nfs_pageio_complete(&desc);
 
-out_failed:
        while (!list_empty(&failed)) {
                req = nfs_list_entry(failed.next);
                nfs_list_remove_request(req);
@@ -791,17 +762,13 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        nfs_init_cinfo_from_dreq(&cinfo, dreq);
 
        spin_lock(&dreq->lock);
-
-       if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
-               dreq->error = hdr->error;
-
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
                spin_unlock(&dreq->lock);
                goto out_put;
        }
 
+       nfs_direct_count_bytes(dreq, hdr);
        if (hdr->good_bytes != 0) {
-               nfs_direct_good_bytes(dreq, hdr);
                if (nfs_write_need_commit(hdr)) {
                        if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
                                request_commit = true;
@@ -923,7 +890,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
                                break;
                        }
 
-                       nfs_direct_setup_mirroring(dreq, &desc, req);
                        if (desc.pg_error < 0) {
                                nfs_free_request(req);
                                result = desc.pg_error;
index 11eafcfc490b93df29ec00829ccd1cde34410df7..caacf5e7f5e1568474feb57f6e4d85aa19efdc8b 100644 (file)
@@ -1440,8 +1440,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode,
                return 0;
        if ((delegation->type & fmode) != fmode)
                return 0;
-       if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
-               return 0;
        switch (claim) {
        case NFS4_OPEN_CLAIM_NULL:
        case NFS4_OPEN_CLAIM_FH:
@@ -1810,7 +1808,6 @@ static void nfs4_return_incompatible_delegation(struct inode *inode, fmode_t fmo
 static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
 {
        struct nfs4_state *state = opendata->state;
-       struct nfs_inode *nfsi = NFS_I(state->inode);
        struct nfs_delegation *delegation;
        int open_mode = opendata->o_arg.open_flags;
        fmode_t fmode = opendata->o_arg.fmode;
@@ -1827,7 +1824,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
                }
                spin_unlock(&state->owner->so_lock);
                rcu_read_lock();
-               delegation = rcu_dereference(nfsi->delegation);
+               delegation = nfs4_get_valid_delegation(state->inode);
                if (!can_open_delegated(delegation, fmode, claim)) {
                        rcu_read_unlock();
                        break;
@@ -2371,7 +2368,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
                                        data->o_arg.open_flags, claim))
                        goto out_no_action;
                rcu_read_lock();
-               delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
+               delegation = nfs4_get_valid_delegation(data->state->inode);
                if (can_open_delegated(delegation, data->o_arg.fmode, claim))
                        goto unlock_no_action;
                rcu_read_unlock();
@@ -6106,6 +6103,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
 
        status = nfs4_call_sync_custom(&task_setup_data);
        if (setclientid.sc_cred) {
+               kfree(clp->cl_acceptor);
                clp->cl_acceptor = rpcauth_stringify_acceptor(setclientid.sc_cred);
                put_rpccred(setclientid.sc_cred);
        }
index 85ca49549b39bdf5b9bea4e65dbb52d15945231b..52cab65f91cf08460a926d45c1dddb40cc885a69 100644 (file)
@@ -786,7 +786,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
        struct nfs_inode *nfsi = NFS_I(inode);
        struct nfs_page *head;
 
-       atomic_long_dec(&nfsi->nrequests);
        if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
                head = req->wb_head;
 
@@ -799,8 +798,10 @@ static void nfs_inode_remove_request(struct nfs_page *req)
                spin_unlock(&mapping->private_lock);
        }
 
-       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
                nfs_release_request(req);
+               atomic_long_dec(&nfsi->nrequests);
+       }
 }
 
 static void
index 8de1c9d644f62ca203ad33d7b5cda3613309f866..9cd0a68159337add337d9015776453edc4ffb0aa 100644 (file)
@@ -2049,7 +2049,8 @@ out_write_size:
                inode->i_mtime = inode->i_ctime = current_time(inode);
                di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
                di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
-               ocfs2_update_inode_fsync_trans(handle, inode, 1);
+               if (handle)
+                       ocfs2_update_inode_fsync_trans(handle, inode, 1);
        }
        if (handle)
                ocfs2_journal_dirty(handle, wc->w_di_bh);
@@ -2146,13 +2147,30 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
        struct ocfs2_dio_write_ctxt *dwc = NULL;
        struct buffer_head *di_bh = NULL;
        u64 p_blkno;
-       loff_t pos = iblock << inode->i_sb->s_blocksize_bits;
+       unsigned int i_blkbits = inode->i_sb->s_blocksize_bits;
+       loff_t pos = iblock << i_blkbits;
+       sector_t endblk = (i_size_read(inode) - 1) >> i_blkbits;
        unsigned len, total_len = bh_result->b_size;
        int ret = 0, first_get_block = 0;
 
        len = osb->s_clustersize - (pos & (osb->s_clustersize - 1));
        len = min(total_len, len);
 
+       /*
+        * bh_result->b_size is count in get_more_blocks according to write
+        * "pos" and "end", we need map twice to return different buffer state:
+        * 1. area in file size, not set NEW;
+        * 2. area out file size, set  NEW.
+        *
+        *                 iblock    endblk
+        * |--------|---------|---------|---------
+        * |<-------area in file------->|
+        */
+
+       if ((iblock <= endblk) &&
+           ((iblock + ((len - 1) >> i_blkbits)) > endblk))
+               len = (endblk - iblock + 1) << i_blkbits;
+
        mlog(0, "get block of %lu at %llu:%u req %u\n",
                        inode->i_ino, pos, len, total_len);
 
@@ -2236,6 +2254,9 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
        if (desc->c_needs_zero)
                set_buffer_new(bh_result);
 
+       if (iblock > endblk)
+               set_buffer_new(bh_result);
+
        /* May sleep in end_io. It should not happen in a irq context. So defer
         * it to dio work queue. */
        set_buffer_defer_completion(bh_result);
index 2e982db3e1ae4b55eb8bc741b9ca9400c791afad..9876db52913a4fda2a67ecf746a553bd0728eea1 100644 (file)
@@ -1230,6 +1230,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                        transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
                        if (IS_ERR(transfer_to[USRQUOTA])) {
                                status = PTR_ERR(transfer_to[USRQUOTA]);
+                               transfer_to[USRQUOTA] = NULL;
                                goto bail_unlock;
                        }
                }
@@ -1239,6 +1240,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
                        transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
                        if (IS_ERR(transfer_to[GRPQUOTA])) {
                                status = PTR_ERR(transfer_to[GRPQUOTA]);
+                               transfer_to[GRPQUOTA] = NULL;
                                goto bail_unlock;
                        }
                }
@@ -2096,53 +2098,89 @@ static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
        return 0;
 }
 
-static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
-                                           struct file *file,
-                                           loff_t pos, size_t count,
-                                           int *meta_level)
+static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
+                                           struct buffer_head **di_bh,
+                                           int meta_level,
+                                           int overwrite_io,
+                                           int write_sem,
+                                           int wait)
 {
-       int ret;
-       struct buffer_head *di_bh = NULL;
-       u32 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-       u32 clusters =
-               ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+       int ret = 0;
 
-       ret = ocfs2_inode_lock(inode, &di_bh, 1);
-       if (ret) {
-               mlog_errno(ret);
+       if (wait)
+               ret = ocfs2_inode_lock(inode, NULL, meta_level);
+       else
+               ret = ocfs2_try_inode_lock(inode,
+                       overwrite_io ? NULL : di_bh, meta_level);
+       if (ret < 0)
                goto out;
+
+       if (wait) {
+               if (write_sem)
+                       down_write(&OCFS2_I(inode)->ip_alloc_sem);
+               else
+                       down_read(&OCFS2_I(inode)->ip_alloc_sem);
+       } else {
+               if (write_sem)
+                       ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+               else
+                       ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
+
+               if (!ret) {
+                       ret = -EAGAIN;
+                       goto out_unlock;
+               }
        }
 
-       *meta_level = 1;
+       return ret;
 
-       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
-       if (ret)
-               mlog_errno(ret);
+out_unlock:
+       brelse(*di_bh);
+       ocfs2_inode_unlock(inode, meta_level);
 out:
-       brelse(di_bh);
        return ret;
 }
 
+static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
+                                              struct buffer_head **di_bh,
+                                              int meta_level,
+                                              int write_sem)
+{
+       if (write_sem)
+               up_write(&OCFS2_I(inode)->ip_alloc_sem);
+       else
+               up_read(&OCFS2_I(inode)->ip_alloc_sem);
+
+       brelse(*di_bh);
+       *di_bh = NULL;
+
+       if (meta_level >= 0)
+               ocfs2_inode_unlock(inode, meta_level);
+}
+
 static int ocfs2_prepare_inode_for_write(struct file *file,
                                         loff_t pos, size_t count, int wait)
 {
        int ret = 0, meta_level = 0, overwrite_io = 0;
+       int write_sem = 0;
        struct dentry *dentry = file->f_path.dentry;
        struct inode *inode = d_inode(dentry);
        struct buffer_head *di_bh = NULL;
+       u32 cpos;
+       u32 clusters;
 
        /*
         * We start with a read level meta lock and only jump to an ex
         * if we need to make modifications here.
         */
        for(;;) {
-               if (wait)
-                       ret = ocfs2_inode_lock(inode, NULL, meta_level);
-               else
-                       ret = ocfs2_try_inode_lock(inode,
-                               overwrite_io ? NULL : &di_bh, meta_level);
+               ret = ocfs2_inode_lock_for_extent_tree(inode,
+                                                      &di_bh,
+                                                      meta_level,
+                                                      overwrite_io,
+                                                      write_sem,
+                                                      wait);
                if (ret < 0) {
-                       meta_level = -1;
                        if (ret != -EAGAIN)
                                mlog_errno(ret);
                        goto out;
@@ -2154,15 +2192,8 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                 */
                if (!wait && !overwrite_io) {
                        overwrite_io = 1;
-                       if (!down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem)) {
-                               ret = -EAGAIN;
-                               goto out_unlock;
-                       }
 
                        ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
-                       brelse(di_bh);
-                       di_bh = NULL;
-                       up_read(&OCFS2_I(inode)->ip_alloc_sem);
                        if (ret < 0) {
                                if (ret != -EAGAIN)
                                        mlog_errno(ret);
@@ -2181,7 +2212,10 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
                 * set inode->i_size at the end of a write. */
                if (should_remove_suid(dentry)) {
                        if (meta_level == 0) {
-                               ocfs2_inode_unlock(inode, meta_level);
+                               ocfs2_inode_unlock_for_extent_tree(inode,
+                                                                  &di_bh,
+                                                                  meta_level,
+                                                                  write_sem);
                                meta_level = 1;
                                continue;
                        }
@@ -2195,18 +2229,32 @@ static int ocfs2_prepare_inode_for_write(struct file *file,
 
                ret = ocfs2_check_range_for_refcount(inode, pos, count);
                if (ret == 1) {
-                       ocfs2_inode_unlock(inode, meta_level);
-                       meta_level = -1;
-
-                       ret = ocfs2_prepare_inode_for_refcount(inode,
-                                                              file,
-                                                              pos,
-                                                              count,
-                                                              &meta_level);
+                       ocfs2_inode_unlock_for_extent_tree(inode,
+                                                          &di_bh,
+                                                          meta_level,
+                                                          write_sem);
+                       ret = ocfs2_inode_lock_for_extent_tree(inode,
+                                                              &di_bh,
+                                                              meta_level,
+                                                              overwrite_io,
+                                                              1,
+                                                              wait);
+                       write_sem = 1;
+                       if (ret < 0) {
+                               if (ret != -EAGAIN)
+                                       mlog_errno(ret);
+                               goto out;
+                       }
+
+                       cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
+                       clusters =
+                               ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
+                       ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
                }
 
                if (ret < 0) {
-                       mlog_errno(ret);
+                       if (ret != -EAGAIN)
+                               mlog_errno(ret);
                        goto out_unlock;
                }
 
@@ -2217,10 +2265,10 @@ out_unlock:
        trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
                                            pos, count, wait);
 
-       brelse(di_bh);
-
-       if (meta_level >= 0)
-               ocfs2_inode_unlock(inode, meta_level);
+       ocfs2_inode_unlock_for_extent_tree(inode,
+                                          &di_bh,
+                                          meta_level,
+                                          write_sem);
 
 out:
        return ret;
index d6f7b299eb236d4b176e653babdb4f1261cb9584..efeea208fdebd6541c11983fe91761bb83f307cb 100644 (file)
@@ -283,7 +283,7 @@ static int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb,
        if (inode_alloc)
                inode_lock(inode_alloc);
 
-       if (o2info_coherent(&fi->ifi_req)) {
+       if (inode_alloc && o2info_coherent(&fi->ifi_req)) {
                status = ocfs2_inode_lock(inode_alloc, &bh, 0);
                if (status < 0) {
                        mlog_errno(status);
index 930e3d388579138cc22016ede4393311ea8e54a3..699a560efbb02b498b01ba2df49f5b334766fa59 100644 (file)
@@ -217,7 +217,8 @@ void ocfs2_recovery_exit(struct ocfs2_super *osb)
        /* At this point, we know that no more recovery threads can be
         * launched, so wait for any recovery completion work to
         * complete. */
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        /*
         * Now that recovery is shut down, and the osb is about to be
index 158e5af767fd11119b39947586cdb364f4c6a6d2..720e9f94957e39047d434bfb3c1a2549a50056fc 100644 (file)
@@ -377,7 +377,8 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
        struct ocfs2_dinode *alloc = NULL;
 
        cancel_delayed_work(&osb->la_enable_wq);
-       flush_workqueue(osb->ocfs2_wq);
+       if (osb->ocfs2_wq)
+               flush_workqueue(osb->ocfs2_wq);
 
        if (osb->local_alloc_state == OCFS2_LA_UNUSED)
                goto out;
index 90c830e3758e2dea9af508efed08b48a8509553f..d8507972ee135f95a4ff9bcc696cc456c2977ee5 100644 (file)
@@ -1490,18 +1490,6 @@ static int ocfs2_xa_check_space(struct ocfs2_xa_loc *loc,
        return loc->xl_ops->xlo_check_space(loc, xi);
 }
 
-static void ocfs2_xa_add_entry(struct ocfs2_xa_loc *loc, u32 name_hash)
-{
-       loc->xl_ops->xlo_add_entry(loc, name_hash);
-       loc->xl_entry->xe_name_hash = cpu_to_le32(name_hash);
-       /*
-        * We can't leave the new entry's xe_name_offset at zero or
-        * add_namevalue() will go nuts.  We set it to the size of our
-        * storage so that it can never be less than any other entry.
-        */
-       loc->xl_entry->xe_name_offset = cpu_to_le16(loc->xl_size);
-}
-
 static void ocfs2_xa_add_namevalue(struct ocfs2_xa_loc *loc,
                                   struct ocfs2_xattr_info *xi)
 {
@@ -2133,29 +2121,31 @@ static int ocfs2_xa_prepare_entry(struct ocfs2_xa_loc *loc,
        if (rc)
                goto out;
 
-       if (loc->xl_entry) {
-               if (ocfs2_xa_can_reuse_entry(loc, xi)) {
-                       orig_value_size = loc->xl_entry->xe_value_size;
-                       rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
-                       if (rc)
-                               goto out;
-                       goto alloc_value;
-               }
+       if (!loc->xl_entry) {
+               rc = -EINVAL;
+               goto out;
+       }
 
-               if (!ocfs2_xattr_is_local(loc->xl_entry)) {
-                       orig_clusters = ocfs2_xa_value_clusters(loc);
-                       rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
-                       if (rc) {
-                               mlog_errno(rc);
-                               ocfs2_xa_cleanup_value_truncate(loc,
-                                                               "overwriting",
-                                                               orig_clusters);
-                               goto out;
-                       }
+       if (ocfs2_xa_can_reuse_entry(loc, xi)) {
+               orig_value_size = loc->xl_entry->xe_value_size;
+               rc = ocfs2_xa_reuse_entry(loc, xi, ctxt);
+               if (rc)
+                       goto out;
+               goto alloc_value;
+       }
+
+       if (!ocfs2_xattr_is_local(loc->xl_entry)) {
+               orig_clusters = ocfs2_xa_value_clusters(loc);
+               rc = ocfs2_xa_value_truncate(loc, 0, ctxt);
+               if (rc) {
+                       mlog_errno(rc);
+                       ocfs2_xa_cleanup_value_truncate(loc,
+                                                       "overwriting",
+                                                       orig_clusters);
+                       goto out;
                }
-               ocfs2_xa_wipe_namevalue(loc);
-       } else
-               ocfs2_xa_add_entry(loc, name_hash);
+       }
+       ocfs2_xa_wipe_namevalue(loc);
 
        /*
         * If we get here, we have a blank entry.  Fill it.  We grow our
index ac9247371871d9069687519b0cb06a21f30330e5..8c1f1bb1a5ce3fd1d9e757a87805fb3df638206c 100644 (file)
@@ -132,9 +132,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
                    global_node_page_state(NR_SHMEM_THPS) * HPAGE_PMD_NR);
        show_val_kb(m, "ShmemPmdMapped: ",
                    global_node_page_state(NR_SHMEM_PMDMAPPED) * HPAGE_PMD_NR);
-       show_val_kb(m, "FileHugePages: ",
+       show_val_kb(m, "FileHugePages:  ",
                    global_node_page_state(NR_FILE_THPS) * HPAGE_PMD_NR);
-       show_val_kb(m, "FilePmdMapped: ",
+       show_val_kb(m, "FilePmdMapped:  ",
                    global_node_page_state(NR_FILE_PMDMAPPED) * HPAGE_PMD_NR);
 #endif
 
index 544d1ee15aeece96efa3e5b39a9481c879584419..7c952ee732e63dffd0ae09f92a6ec3e6c57e5771 100644 (file)
@@ -42,10 +42,12 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
+
                if (!ppage || PageSlab(ppage) || page_has_type(ppage))
                        pcount = 0;
                else
@@ -216,10 +218,11 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (put_user(stable_page_flags(ppage), out)) {
                        ret = -EFAULT;
@@ -261,10 +264,11 @@ static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
                return -EINVAL;
 
        while (count > 0) {
-               if (pfn_valid(pfn))
-                       ppage = pfn_to_page(pfn);
-               else
-                       ppage = NULL;
+               /*
+                * TODO: ZONE_DEVICE support requires to identify
+                * memmaps that were actually initialized.
+                */
+               ppage = pfn_to_online_page(pfn);
 
                if (ppage)
                        ino = page_cgroup_ino(ppage);
index 2f6a4534e0dfeb644ccc005471826e4654cafe90..d26d5ea4de7b8958ea3d48069cd873b50398cd88 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/unistd.h>
 #include <linux/compat.h>
-
 #include <linux/uaccess.h>
 
+#include <asm/unaligned.h>
+
+/*
+ * Note the "unsafe_put_user() semantics: we goto a
+ * label for errors.
+ */
+#define unsafe_copy_dirent_name(_dst, _src, _len, label) do {  \
+       char __user *dst = (_dst);                              \
+       const char *src = (_src);                               \
+       size_t len = (_len);                                    \
+       unsafe_put_user(0, dst+len, label);                     \
+       unsafe_copy_to_user(dst, src, len, label);              \
+} while (0)
+
+
 int iterate_dir(struct file *file, struct dir_context *ctx)
 {
        struct inode *inode = file_inode(file);
@@ -64,6 +78,40 @@ out:
 }
 EXPORT_SYMBOL(iterate_dir);
 
+/*
+ * POSIX says that a dirent name cannot contain NULL or a '/'.
+ *
+ * It's not 100% clear what we should really do in this case.
+ * The filesystem is clearly corrupted, but returning a hard
+ * error means that you now don't see any of the other names
+ * either, so that isn't a perfect alternative.
+ *
+ * And if you return an error, what error do you use? Several
+ * filesystems seem to have decided on EUCLEAN being the error
+ * code for EFSCORRUPTED, and that may be the error to use. Or
+ * just EIO, which is perhaps more obvious to users.
+ *
+ * In order to see the other file names in the directory, the
+ * caller might want to make this a "soft" error: skip the
+ * entry, and return the error at the end instead.
+ *
+ * Note that this should likely do a "memchr(name, 0, len)"
+ * check too, since that would be filesystem corruption as
+ * well. However, that case can't actually confuse user space,
+ * which has to do a strlen() on the name anyway to find the
+ * filename length, and the above "soft error" worry means
+ * that it's probably better left alone until we have that
+ * issue clarified.
+ */
+static int verify_dirent_name(const char *name, int len)
+{
+       if (!len)
+               return -EIO;
+       if (memchr(name, '/', len))
+               return -EIO;
+       return 0;
+}
+
 /*
  * Traditional linux readdir() handling..
  *
@@ -173,6 +221,9 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
        int reclen = ALIGN(offsetof(struct linux_dirent, d_name) + namlen + 2,
                sizeof(long));
 
+       buf->error = verify_dirent_name(name, namlen);
+       if (unlikely(buf->error))
+               return buf->error;
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
@@ -182,28 +233,31 @@ static int filldir(struct dir_context *ctx, const char *name, int namlen,
                return -EOVERFLOW;
        }
        dirent = buf->previous;
-       if (dirent) {
-               if (signal_pending(current))
-                       return -EINTR;
-               if (__put_user(offset, &dirent->d_off))
-                       goto efault;
-       }
-       dirent = buf->current_dir;
-       if (__put_user(d_ino, &dirent->d_ino))
-               goto efault;
-       if (__put_user(reclen, &dirent->d_reclen))
-               goto efault;
-       if (copy_to_user(dirent->d_name, name, namlen))
-               goto efault;
-       if (__put_user(0, dirent->d_name + namlen))
-               goto efault;
-       if (__put_user(d_type, (char __user *) dirent + reclen - 1))
+       if (dirent && signal_pending(current))
+               return -EINTR;
+
+       /*
+        * Note! This range-checks 'previous' (which may be NULL).
+        * The real range was checked in getdents
+        */
+       if (!user_access_begin(dirent, sizeof(*dirent)))
                goto efault;
+       if (dirent)
+               unsafe_put_user(offset, &dirent->d_off, efault_end);
+       dirent = buf->current_dir;
+       unsafe_put_user(d_ino, &dirent->d_ino, efault_end);
+       unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+       unsafe_put_user(d_type, (char __user *) dirent + reclen - 1, efault_end);
+       unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+       user_access_end();
+
        buf->previous = dirent;
        dirent = (void __user *)dirent + reclen;
        buf->current_dir = dirent;
        buf->count -= reclen;
        return 0;
+efault_end:
+       user_access_end();
 efault:
        buf->error = -EFAULT;
        return -EFAULT;
@@ -259,34 +313,38 @@ static int filldir64(struct dir_context *ctx, const char *name, int namlen,
        int reclen = ALIGN(offsetof(struct linux_dirent64, d_name) + namlen + 1,
                sizeof(u64));
 
+       buf->error = verify_dirent_name(name, namlen);
+       if (unlikely(buf->error))
+               return buf->error;
        buf->error = -EINVAL;   /* only used if we fail.. */
        if (reclen > buf->count)
                return -EINVAL;
        dirent = buf->previous;
-       if (dirent) {
-               if (signal_pending(current))
-                       return -EINTR;
-               if (__put_user(offset, &dirent->d_off))
-                       goto efault;
-       }
-       dirent = buf->current_dir;
-       if (__put_user(ino, &dirent->d_ino))
-               goto efault;
-       if (__put_user(0, &dirent->d_off))
-               goto efault;
-       if (__put_user(reclen, &dirent->d_reclen))
-               goto efault;
-       if (__put_user(d_type, &dirent->d_type))
-               goto efault;
-       if (copy_to_user(dirent->d_name, name, namlen))
-               goto efault;
-       if (__put_user(0, dirent->d_name + namlen))
+       if (dirent && signal_pending(current))
+               return -EINTR;
+
+       /*
+        * Note! This range-checks 'previous' (which may be NULL).
+        * The real range was checked in getdents
+        */
+       if (!user_access_begin(dirent, sizeof(*dirent)))
                goto efault;
+       if (dirent)
+               unsafe_put_user(offset, &dirent->d_off, efault_end);
+       dirent = buf->current_dir;
+       unsafe_put_user(ino, &dirent->d_ino, efault_end);
+       unsafe_put_user(reclen, &dirent->d_reclen, efault_end);
+       unsafe_put_user(d_type, &dirent->d_type, efault_end);
+       unsafe_copy_dirent_name(dirent->d_name, name, namlen, efault_end);
+       user_access_end();
+
        buf->previous = dirent;
        dirent = (void __user *)dirent + reclen;
        buf->current_dir = dirent;
        buf->count -= reclen;
        return 0;
+efault_end:
+       user_access_end();
 efault:
        buf->error = -EFAULT;
        return -EFAULT;
index eea7af6f2f229e85f71e3ff3ac5d541e88956856..2616424012ea7bf4bc7f87a935466c74bb267397 100644 (file)
@@ -318,19 +318,10 @@ COMPAT_SYSCALL_DEFINE2(fstatfs, unsigned int, fd, struct compat_statfs __user *,
 static int put_compat_statfs64(struct compat_statfs64 __user *ubuf, struct kstatfs *kbuf)
 {
        struct compat_statfs64 buf;
-       if (sizeof(ubuf->f_bsize) == 4) {
-               if ((kbuf->f_type | kbuf->f_bsize | kbuf->f_namelen |
-                    kbuf->f_frsize | kbuf->f_flags) & 0xffffffff00000000ULL)
-                       return -EOVERFLOW;
-               /* f_files and f_ffree may be -1; it's okay
-                * to stuff that into 32 bits */
-               if (kbuf->f_files != 0xffffffffffffffffULL
-                && (kbuf->f_files & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-               if (kbuf->f_ffree != 0xffffffffffffffffULL
-                && (kbuf->f_ffree & 0xffffffff00000000ULL))
-                       return -EOVERFLOW;
-       }
+
+       if ((kbuf->f_bsize | kbuf->f_frsize) & 0xffffffff00000000ULL)
+               return -EOVERFLOW;
+
        memset(&buf, 0, sizeof(struct compat_statfs64));
        buf.f_type = kbuf->f_type;
        buf.f_bsize = kbuf->f_bsize;
index f627b7c53d2b7fefda7546daac7efac40f1a765f..cfadab2cbf35fdfd67139e6c341840308adb93b9 100644 (file)
@@ -1300,6 +1300,7 @@ int get_tree_bdev(struct fs_context *fc,
        mutex_lock(&bdev->bd_fsfreeze_mutex);
        if (bdev->bd_fsfreeze_count > 0) {
                mutex_unlock(&bdev->bd_fsfreeze_mutex);
+               blkdev_put(bdev, mode);
                warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
                return -EBUSY;
        }
@@ -1308,8 +1309,10 @@ int get_tree_bdev(struct fs_context *fc,
        fc->sget_key = bdev;
        s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
        mutex_unlock(&bdev->bd_fsfreeze_mutex);
-       if (IS_ERR(s))
+       if (IS_ERR(s)) {
+               blkdev_put(bdev, mode);
                return PTR_ERR(s);
+       }
 
        if (s->s_root) {
                /* Don't summarily change the RO/RW state. */
index 9fc14e38927f9d5adc32f41bb67c92074f26cd6f..0caa151cae4ee101d13cd218b695b98787b730ad 100644 (file)
 #include <linux/namei.h>
 #include <linux/tracefs.h>
 #include <linux/fsnotify.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/parser.h>
 #include <linux/magic.h>
 #include <linux/slab.h>
-#include <linux/security.h>
 
 #define TRACEFS_DEFAULT_MODE   0700
 
@@ -28,25 +28,6 @@ static struct vfsmount *tracefs_mount;
 static int tracefs_mount_count;
 static bool tracefs_registered;
 
-static int default_open_file(struct inode *inode, struct file *filp)
-{
-       struct dentry *dentry = filp->f_path.dentry;
-       struct file_operations *real_fops;
-       int ret;
-
-       if (!dentry)
-               return -EINVAL;
-
-       ret = security_locked_down(LOCKDOWN_TRACEFS);
-       if (ret)
-               return ret;
-
-       real_fops = dentry->d_fsdata;
-       if (!real_fops->open)
-               return 0;
-       return real_fops->open(inode, filp);
-}
-
 static ssize_t default_read_file(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
@@ -241,12 +222,6 @@ static int tracefs_apply_options(struct super_block *sb)
        return 0;
 }
 
-static void tracefs_destroy_inode(struct inode *inode)
-{
-       if (S_ISREG(inode->i_mode))
-               kfree(inode->i_fop);
-}
-
 static int tracefs_remount(struct super_block *sb, int *flags, char *data)
 {
        int err;
@@ -283,7 +258,6 @@ static int tracefs_show_options(struct seq_file *m, struct dentry *root)
 static const struct super_operations tracefs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = tracefs_remount,
-       .destroy_inode  = tracefs_destroy_inode,
        .show_options   = tracefs_show_options,
 };
 
@@ -414,10 +388,12 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
                                   const struct file_operations *fops)
 {
-       struct file_operations *proxy_fops;
        struct dentry *dentry;
        struct inode *inode;
 
+       if (security_locked_down(LOCKDOWN_TRACEFS))
+               return NULL;
+
        if (!(mode & S_IFMT))
                mode |= S_IFREG;
        BUG_ON(!S_ISREG(mode));
@@ -430,20 +406,8 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
        if (unlikely(!inode))
                return failed_creating(dentry);
 
-       proxy_fops = kzalloc(sizeof(struct file_operations), GFP_KERNEL);
-       if (unlikely(!proxy_fops)) {
-               iput(inode);
-               return failed_creating(dentry);
-       }
-
-       if (!fops)
-               fops = &tracefs_file_operations;
-
-       dentry->d_fsdata = (void *)fops;
-       memcpy(proxy_fops, fops, sizeof(*proxy_fops));
-       proxy_fops->open = default_open_file;
        inode->i_mode = mode;
-       inode->i_fop = proxy_fops;
+       inode->i_fop = fops ? fops : &tracefs_file_operations;
        inode->i_private = data;
        d_instantiate(dentry, inode);
        fsnotify_create(dentry->d_parent->d_inode, dentry);
index 5de296b34ab1f618ea489225cbcd054bfd9aa7f9..14fbdf22b7e7724fae102f6ef259707d40e96d7c 100644 (file)
@@ -28,12 +28,11 @@ xfs_get_aghdr_buf(
        struct xfs_mount        *mp,
        xfs_daddr_t             blkno,
        size_t                  numblks,
-       int                     flags,
        const struct xfs_buf_ops *ops)
 {
        struct xfs_buf          *bp;
 
-       bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, flags);
+       bp = xfs_buf_get_uncached(mp->m_ddev_targp, numblks, 0);
        if (!bp)
                return NULL;
 
@@ -345,7 +344,7 @@ xfs_ag_init_hdr(
 {
        struct xfs_buf          *bp;
 
-       bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, 0, ops);
+       bp = xfs_get_aghdr_buf(mp, id->daddr, id->numblks, ops);
        if (!bp)
                return -ENOMEM;
 
index b9f019603d0b7f2fa5e5bcc6cc1a4cc0f06dbeb2..f0089e862216ca1071d51163c0f1da41b67bb56d 100644 (file)
@@ -826,32 +826,17 @@ xfs_attr_shortform_to_leaf(
        sf = (xfs_attr_shortform_t *)tmpbuffer;
 
        xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
-       xfs_bmap_local_to_extents_empty(dp, XFS_ATTR_FORK);
+       xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK);
 
        bp = NULL;
        error = xfs_da_grow_inode(args, &blkno);
-       if (error) {
-               /*
-                * If we hit an IO error middle of the transaction inside
-                * grow_inode(), we may have inconsistent data. Bail out.
-                */
-               if (error == -EIO)
-                       goto out;
-               xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
-               memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
+       if (error)
                goto out;
-       }
 
        ASSERT(blkno == 0);
        error = xfs_attr3_leaf_create(args, blkno, &bp);
-       if (error) {
-               /* xfs_attr3_leaf_create may not have instantiated a block */
-               if (bp && (xfs_da_shrink_inode(args, 0, bp) != 0))
-                       goto out;
-               xfs_idata_realloc(dp, size, XFS_ATTR_FORK);     /* try to put */
-               memcpy(ifp->if_u1.if_data, tmpbuffer, size);    /* it back */
+       if (error)
                goto out;
-       }
 
        memset((char *)&nargs, 0, sizeof(nargs));
        nargs.dp = dp;
index 4edc25a2ba80703d25a05de8123da4b7aa6f6bc7..02469d59c7879d5a6e039d929df11e558ab186f3 100644 (file)
@@ -792,6 +792,7 @@ out_root_realloc:
  */
 void
 xfs_bmap_local_to_extents_empty(
+       struct xfs_trans        *tp,
        struct xfs_inode        *ip,
        int                     whichfork)
 {
@@ -808,6 +809,7 @@ xfs_bmap_local_to_extents_empty(
        ifp->if_u1.if_root = NULL;
        ifp->if_height = 0;
        XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 }
 
 
@@ -840,7 +842,7 @@ xfs_bmap_local_to_extents(
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL);
 
        if (!ifp->if_bytes) {
-               xfs_bmap_local_to_extents_empty(ip, whichfork);
+               xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
                flags = XFS_ILOG_CORE;
                goto done;
        }
@@ -887,7 +889,7 @@ xfs_bmap_local_to_extents(
 
        /* account for the change in fork size */
        xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
-       xfs_bmap_local_to_extents_empty(ip, whichfork);
+       xfs_bmap_local_to_extents_empty(tp, ip, whichfork);
        flags |= XFS_ILOG_CORE;
 
        ifp->if_u1.if_root = NULL;
index 5bb446d8054243d21867cf54279560cd5b637436..e2798c6f3a5f350f655ec02ef6e439212d0b45d8 100644 (file)
@@ -182,7 +182,8 @@ void        xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
                xfs_filblks_t len);
 int    xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
 int    xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
-void   xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
+void   xfs_bmap_local_to_extents_empty(struct xfs_trans *tp,
+               struct xfs_inode *ip, int whichfork);
 void   __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
                xfs_filblks_t len, const struct xfs_owner_info *oinfo,
                bool skip_discard);
index 9595ced393dce600e67f4903d0a3c599caa23e85..49e4bc39e7bb2bec60599cc6a2d6ae2087324050 100644 (file)
@@ -1096,7 +1096,7 @@ xfs_dir2_sf_to_block(
        memcpy(sfp, oldsfp, ifp->if_bytes);
 
        xfs_idata_realloc(dp, -ifp->if_bytes, XFS_DATA_FORK);
-       xfs_bmap_local_to_extents_empty(dp, XFS_DATA_FORK);
+       xfs_bmap_local_to_extents_empty(tp, dp, XFS_DATA_FORK);
        dp->i_d.di_size = 0;
 
        /*
index 39dd2b90810626b5075e9c5242532390c53f0148..e9371a8e0e26ffd30f8e2892e549c9357eb0fedd 100644 (file)
@@ -366,11 +366,11 @@ struct xfs_bulkstat {
        uint64_t        bs_blocks;      /* number of blocks             */
        uint64_t        bs_xflags;      /* extended flags               */
 
-       uint64_t        bs_atime;       /* access time, seconds         */
-       uint64_t        bs_mtime;       /* modify time, seconds         */
+       int64_t         bs_atime;       /* access time, seconds         */
+       int64_t         bs_mtime;       /* modify time, seconds         */
 
-       uint64_t        bs_ctime;       /* inode change time, seconds   */
-       uint64_t        bs_btime;       /* creation time, seconds       */
+       int64_t         bs_ctime;       /* inode change time, seconds   */
+       int64_t         bs_btime;       /* creation time, seconds       */
 
        uint32_t        bs_gen;         /* generation count             */
        uint32_t        bs_uid;         /* user id                      */
index 93b3793bc5b31a91dc466dc0a904a71c549e8dd0..0cab11a5d39070aab46887cff8a4069a921f3eac 100644 (file)
@@ -341,7 +341,6 @@ xchk_refcountbt_rec(
        xfs_extlen_t            len;
        xfs_nlink_t             refcount;
        bool                    has_cowflag;
-       int                     error = 0;
 
        bno = be32_to_cpu(rec->refc.rc_startblock);
        len = be32_to_cpu(rec->refc.rc_blockcount);
@@ -366,7 +365,7 @@ xchk_refcountbt_rec(
 
        xchk_refcountbt_xref(bs->sc, bno, len, refcount);
 
-       return error;
+       return 0;
 }
 
 /* Make sure we have as many refc blocks as the rmap says. */
index 0910cb75b65d77b356d1cde4f38662d48a7d9367..4f443703065e3319db345d88d02f9701ba8a249c 100644 (file)
@@ -864,6 +864,7 @@ xfs_alloc_file_space(
        xfs_filblks_t           allocatesize_fsb;
        xfs_extlen_t            extsz, temp;
        xfs_fileoff_t           startoffset_fsb;
+       xfs_fileoff_t           endoffset_fsb;
        int                     nimaps;
        int                     quota_flag;
        int                     rt;
@@ -891,7 +892,8 @@ xfs_alloc_file_space(
        imapp = &imaps[0];
        nimaps = 1;
        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
-       allocatesize_fsb = XFS_B_TO_FSB(mp, count);
+       endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
+       allocatesize_fsb = endoffset_fsb - startoffset_fsb;
 
        /*
         * Allocate file space until done or until there is an error
index 21c243622a79d238dc13578dd0c22c1386d46ee9..0abba171aa89b6c948fc795201d2eee9734d6e06 100644 (file)
@@ -345,6 +345,15 @@ xfs_buf_allocate_memory(
        unsigned short          page_count, i;
        xfs_off_t               start, end;
        int                     error;
+       xfs_km_flags_t          kmflag_mask = 0;
+
+       /*
+        * assure zeroed buffer for non-read cases.
+        */
+       if (!(flags & XBF_READ)) {
+               kmflag_mask |= KM_ZERO;
+               gfp_mask |= __GFP_ZERO;
+       }
 
        /*
         * for buffers that are contained within a single page, just allocate
@@ -354,7 +363,8 @@ xfs_buf_allocate_memory(
        size = BBTOB(bp->b_length);
        if (size < PAGE_SIZE) {
                int align_mask = xfs_buftarg_dma_alignment(bp->b_target);
-               bp->b_addr = kmem_alloc_io(size, align_mask, KM_NOFS);
+               bp->b_addr = kmem_alloc_io(size, align_mask,
+                                          KM_NOFS | kmflag_mask);
                if (!bp->b_addr) {
                        /* low memory - use alloc_page loop instead */
                        goto use_alloc_page;
index a2beee9f74dabfad4eec3922a27cd8784135f2f5..641d07f30a2771731dede545dbeb219cef43f5a6 100644 (file)
@@ -1443,7 +1443,7 @@ xlog_alloc_log(
                prev_iclog = iclog;
 
                iclog->ic_data = kmem_alloc_io(log->l_iclog_size, align_mask,
-                                               KM_MAYFAIL);
+                                               KM_MAYFAIL | KM_ZERO);
                if (!iclog->ic_data)
                        goto out_free_iclog;
 #ifdef DEBUG
index 508319039dceb54a3d1c167b82fa1c587fef709a..c1a514ffff55507e2986568868b7df915f3b00f8 100644 (file)
@@ -127,7 +127,7 @@ xlog_alloc_buffer(
        if (nbblks > 1 && log->l_sectBBsize > 1)
                nbblks += log->l_sectBBsize;
        nbblks = round_up(nbblks, log->l_sectBBsize);
-       return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL);
+       return kmem_alloc_io(BBTOB(nbblks), align_mask, KM_MAYFAIL | KM_ZERO);
 }
 
 /*
index f936033cb9e6d57e0b965b5cb742ac868c10c463..47805172e73d80e4ee3c771a462b0c1840847a10 100644 (file)
@@ -232,8 +232,8 @@ struct acpi_processor {
        struct acpi_processor_limit limit;
        struct thermal_cooling_device *cdev;
        struct device *dev; /* Processor device. */
-       struct dev_pm_qos_request perflib_req;
-       struct dev_pm_qos_request thermal_req;
+       struct freq_qos_request perflib_req;
+       struct freq_qos_request thermal_req;
 };
 
 struct acpi_processor_errata {
@@ -302,8 +302,8 @@ static inline void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx
 #ifdef CONFIG_CPU_FREQ
 extern bool acpi_processor_cpufreq_init;
 void acpi_processor_ignore_ppc_init(void);
-void acpi_processor_ppc_init(int cpu);
-void acpi_processor_ppc_exit(int cpu);
+void acpi_processor_ppc_init(struct cpufreq_policy *policy);
+void acpi_processor_ppc_exit(struct cpufreq_policy *policy);
 void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag);
 extern int acpi_processor_get_bios_limit(int cpu, unsigned int *limit);
 #else
@@ -311,11 +311,11 @@ static inline void acpi_processor_ignore_ppc_init(void)
 {
        return;
 }
-static inline void acpi_processor_ppc_init(int cpu)
+static inline void acpi_processor_ppc_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_processor_ppc_exit(int cpu)
+static inline void acpi_processor_ppc_exit(struct cpufreq_policy *policy)
 {
        return;
 }
@@ -431,14 +431,14 @@ static inline int acpi_processor_hotplug(struct acpi_processor *pr)
 int acpi_processor_get_limit_info(struct acpi_processor *pr);
 extern const struct thermal_cooling_device_ops processor_cooling_ops;
 #if defined(CONFIG_ACPI_CPU_FREQ_PSS) & defined(CONFIG_CPU_FREQ)
-void acpi_thermal_cpufreq_init(int cpu);
-void acpi_thermal_cpufreq_exit(int cpu);
+void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy);
+void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy);
 #else
-static inline void acpi_thermal_cpufreq_init(int cpu)
+static inline void acpi_thermal_cpufreq_init(struct cpufreq_policy *policy)
 {
        return;
 }
-static inline void acpi_thermal_cpufreq_exit(int cpu)
+static inline void acpi_thermal_cpufreq_exit(struct cpufreq_policy *policy)
 {
        return;
 }
index 90528f12bdfa6a3a28ed1e00e5ab7e68b104535c..29fc933df3bf0eaf5dae01b458854d9cb410eef9 100644 (file)
@@ -326,10 +326,11 @@ static inline int bitmap_equal(const unsigned long *src1,
 }
 
 /**
- * bitmap_or_equal - Check whether the or of two bitnaps is equal to a third
+ * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third
  * @src1:      Pointer to bitmap 1
  * @src2:      Pointer to bitmap 2 will be or'ed with bitmap 1
  * @src3:      Pointer to bitmap 3. Compare to the result of *@src1 | *@src2
+ * @nbits:     number of bits in each of these bitmaps
  *
  * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise
  */
index cf074bce3eb322c2ced7e3e05641f4c87d080ee0..c94a9ff9f082e3301809eb5f47c0493bdbaddc7e 100644 (file)
@@ -4,6 +4,13 @@
 #include <asm/types.h>
 #include <linux/bits.h>
 
+/* Set bits in the first 'n' bytes when loaded from memory */
+#ifdef __LITTLE_ENDIAN
+#  define aligned_byte_mask(n) ((1UL << 8*(n))-1)
+#else
+#  define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
+#endif
+
 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
 
index 6b318efd8a7428042771b0b83f4e5c5846d46a9d..cdf016596659a3f06d27dd58fb79916ade5a378a 100644 (file)
@@ -40,6 +40,7 @@
 # define __GCC4_has_attribute___noclone__             1
 # define __GCC4_has_attribute___nonstring__           0
 # define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8)
+# define __GCC4_has_attribute___fallthrough__         0
 #endif
 
 /*
 # define __noclone
 #endif
 
+/*
+ * Add the pseudo keyword 'fallthrough' so case statement blocks
+ * must end with any of these keywords:
+ *   break;
+ *   fallthrough;
+ *   goto <label>;
+ *   return [expression];
+ *
+ *  gcc: https://gcc.gnu.org/onlinedocs/gcc/Statement-Attributes.html#Statement-Attributes
+ */
+#if __has_attribute(__fallthrough__)
+# define fallthrough                    __attribute__((__fallthrough__))
+#else
+# define fallthrough                    do {} while (0)  /* fallthrough */
+#endif
+
 /*
  * Note the missing underscores.
  *
index c57e88e85c41c6389774e49b6e37057aa2ba3b03..92d5fdc8154ee9f1e2ec38b8a78b9b486240bd24 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/completion.h>
 #include <linux/kobject.h>
 #include <linux/notifier.h>
+#include <linux/pm_qos.h>
 #include <linux/spinlock.h>
 #include <linux/sysfs.h>
 
@@ -76,8 +77,10 @@ struct cpufreq_policy {
        struct work_struct      update; /* if update_policy() needs to be
                                         * called, but you're in IRQ context */
 
-       struct dev_pm_qos_request *min_freq_req;
-       struct dev_pm_qos_request *max_freq_req;
+       struct freq_constraints constraints;
+       struct freq_qos_request *min_freq_req;
+       struct freq_qos_request *max_freq_req;
+
        struct cpufreq_frequency_table  *freq_table;
        enum cpufreq_table_sorting freq_table_sorted;
 
index 79435cfc20ebbf0c97f7c07528a7b4d5bd6008f4..897e799dbcb9fb3569da82c89b1fec1dc6ae3fad 100644 (file)
@@ -31,6 +31,8 @@
 #define SJA1105_META_SMAC                      0x222222222222ull
 #define SJA1105_META_DMAC                      0x0180C200000Eull
 
+#define SJA1105_HWTS_RX_EN                     0
+
 /* Global tagger data: each struct sja1105_port has a reference to
  * the structure defined in struct sja1105_private.
  */
@@ -42,7 +44,7 @@ struct sja1105_tagger_data {
         * from taggers running on multiple ports on SMP systems
         */
        spinlock_t meta_lock;
-       bool hwts_rx_en;
+       unsigned long state;
 };
 
 struct sja1105_skb_cb {
index 6c809440f31978228acb3f29762ccb6901fd8341..4cf02ecd67de7f61c84ec7c8db22d48cd900fb85 100644 (file)
@@ -204,6 +204,12 @@ static inline int ddebug_dyndbg_module_param_cb(char *param, char *val,
        do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0)
 #define dynamic_dev_dbg(dev, fmt, ...)                                 \
        do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0)
+#define dynamic_hex_dump(prefix_str, prefix_type, rowsize,             \
+                        groupsize, buf, len, ascii)                    \
+       do { if (0)                                                     \
+               print_hex_dump(KERN_DEBUG, prefix_str, prefix_type,     \
+                               rowsize, groupsize, buf, len, ascii);   \
+       } while (0)
 #endif
 
 #endif
index bd3837022307ac34271054b4024f8ec6bab09e5a..d87acf62958e207f941080c251048c9215df834e 100644 (file)
@@ -1579,9 +1579,22 @@ char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
 efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
                                struct efi_boot_memmap *map);
 
+efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
+                                unsigned long size, unsigned long align,
+                                unsigned long *addr, unsigned long min);
+
+static inline
 efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
                           unsigned long size, unsigned long align,
-                          unsigned long *addr);
+                          unsigned long *addr)
+{
+       /*
+        * Don't allocate at 0x0. It will confuse code that
+        * checks pointers against NULL. Skip the first 8
+        * bytes so we start at a nice even number.
+        */
+       return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
+}
 
 efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
                            unsigned long size, unsigned long align,
@@ -1592,7 +1605,8 @@ efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
                                 unsigned long image_size,
                                 unsigned long alloc_size,
                                 unsigned long preferred_addr,
-                                unsigned long alignment);
+                                unsigned long alignment,
+                                unsigned long min_addr);
 
 efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
                                  efi_loaded_image_t *image,
index 95f55b7f83a0a28a67898e441c959ae05b32e24f..941d075f03d684d20b3c1c27c3a6304324e185f5 100644 (file)
@@ -18,8 +18,6 @@ extern struct module __this_module;
 #define THIS_MODULE ((struct module *)0)
 #endif
 
-#define NS_SEPARATOR "."
-
 #ifdef CONFIG_MODVERSIONS
 /* Mark the CRC weak since genksyms apparently decides not to
  * generate a checksums for some symbols */
@@ -48,14 +46,14 @@ extern struct module __this_module;
  * absolute relocations that require runtime processing on relocatable
  * kernels.
  */
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
        __ADDRESSABLE(sym)                                              \
        asm("   .section \"___ksymtab" sec "+" #sym "\", \"a\"  \n"     \
            "   .balign 4                                       \n"     \
-           "__ksymtab_" #sym NS_SEPARATOR #ns ":               \n"     \
+           "__ksymtab_" #sym ":                                \n"     \
            "   .long   " #sym "- .                             \n"     \
            "   .long   __kstrtab_" #sym "- .                   \n"     \
-           "   .long   __kstrtab_ns_" #sym "- .                \n"     \
+           "   .long   __kstrtabns_" #sym "- .                 \n"     \
            "   .previous                                       \n")
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
@@ -74,16 +72,14 @@ struct kernel_symbol {
        int namespace_offset;
 };
 #else
-#define __KSYMTAB_ENTRY_NS(sym, sec, ns)                               \
-       static const struct kernel_symbol __ksymtab_##sym##__##ns       \
-       asm("__ksymtab_" #sym NS_SEPARATOR #ns)                         \
+#define __KSYMTAB_ENTRY_NS(sym, sec)                                   \
+       static const struct kernel_symbol __ksymtab_##sym               \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
-       = { (unsigned long)&sym, __kstrtab_##sym, __kstrtab_ns_##sym }
+       = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
 
 #define __KSYMTAB_ENTRY(sym, sec)                                      \
        static const struct kernel_symbol __ksymtab_##sym               \
-       asm("__ksymtab_" #sym)                                          \
        __attribute__((section("___ksymtab" sec "+" #sym), used))       \
        __aligned(sizeof(void *))                                       \
        = { (unsigned long)&sym, __kstrtab_##sym, NULL }
@@ -112,10 +108,10 @@ struct kernel_symbol {
 /* For every exported symbol, place a struct in the __ksymtab section */
 #define ___EXPORT_SYMBOL_NS(sym, sec, ns)                              \
        ___export_symbol_common(sym, sec);                              \
-       static const char __kstrtab_ns_##sym[]                          \
+       static const char __kstrtabns_##sym[]                           \
        __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
        = #ns;                                                          \
-       __KSYMTAB_ENTRY_NS(sym, sec, ns)
+       __KSYMTAB_ENTRY_NS(sym, sec)
 
 #define ___EXPORT_SYMBOL(sym, sec)                                     \
        ___export_symbol_common(sym, sec);                              \
index 2ce57645f3cd4a00d8d385b7e1a05296191df476..0367a75f873b6b28510155591f68077732e4638d 100644 (file)
@@ -1099,7 +1099,6 @@ static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym)
 
 #endif /* CONFIG_BPF_JIT */
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
 void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
 
 #define BPF_ANC                BIT(15)
index fb07b503dc453ddfe16c5f0f959d46be01ad55ba..61f2f6ff94673b0a74d2d7705d87373bfba74fe5 100644 (file)
@@ -325,6 +325,29 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
        return !!(gfp_flags & __GFP_DIRECT_RECLAIM);
 }
 
+/**
+ * gfpflags_normal_context - is gfp_flags a normal sleepable context?
+ * @gfp_flags: gfp_flags to test
+ *
+ * Test whether @gfp_flags indicates that the allocation is from the
+ * %current context and allowed to sleep.
+ *
+ * An allocation being allowed to block doesn't mean it owns the %current
+ * context.  When direct reclaim path tries to allocate memory, the
+ * allocation context is nested inside whatever %current was doing at the
+ * time of the original allocation.  The nested allocation may be allowed
+ * to block but modifying anything %current owns can corrupt the outer
+ * context's expectations.
+ *
+ * %true result from this function indicates that the allocation context
+ * can sleep and use anything that's associated with %current.
+ */
+static inline bool gfpflags_normal_context(const gfp_t gfp_flags)
+{
+       return (gfp_flags & (__GFP_DIRECT_RECLAIM | __GFP_MEMALLOC)) ==
+               __GFP_DIRECT_RECLAIM;
+}
+
 #ifdef CONFIG_HIGHMEM
 #define OPT_ZONE_HIGHMEM ZONE_HIGHMEM
 #else
index f8245d67f07089774bca10017df85fb82ee39835..5dd9c982e2cbee6d10aa42fbcd888d702e6751cc 100644 (file)
@@ -201,6 +201,14 @@ struct gpio_irq_chip {
         */
        bool threaded;
 
+       /**
+        * @init_hw: optional routine to initialize hardware before
+        * an IRQ chip will be added. This is quite useful when
+        * a particular driver wants to clear IRQ related registers
+        * in order to avoid undesired events.
+        */
+       int (*init_hw)(struct gpio_chip *chip);
+
        /**
         * @init_valid_mask: optional routine to initialize @valid_mask, to be
         * used if not all GPIO lines are valid interrupts. Sometimes some
index 04c36b7a61ddcb15458c7fdc4612652dec33a4c4..72579168189d47c0f3fee924c748ae7ad5831350 100644 (file)
@@ -235,7 +235,7 @@ enum hwmon_power_attributes {
 #define HWMON_P_LABEL                  BIT(hwmon_power_label)
 #define HWMON_P_ALARM                  BIT(hwmon_power_alarm)
 #define HWMON_P_CAP_ALARM              BIT(hwmon_power_cap_alarm)
-#define HWMON_P_MIN_ALARM              BIT(hwmon_power_max_alarm)
+#define HWMON_P_MIN_ALARM              BIT(hwmon_power_min_alarm)
 #define HWMON_P_MAX_ALARM              BIT(hwmon_power_max_alarm)
 #define HWMON_P_LCRIT_ALARM            BIT(hwmon_power_lcrit_alarm)
 #define HWMON_P_CRIT_ALARM             BIT(hwmon_power_crit_alarm)
index 2e55e4cdbd8aaf2fb6f1e9220ba2a8f3d6285de5..a367ead4bf4bb16e0c41544335031f85aa2d61b3 100644 (file)
@@ -29,7 +29,6 @@ struct macvlan_dev {
        netdev_features_t       set_features;
        enum macvlan_mode       mode;
        u16                     flags;
-       int                     nest_level;
        unsigned int            macaddr_count;
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll          *netpoll;
index 06faa066496f92fd381ae093568d640d3dfd1a1f..ec7e4bd07f825539c21c74e87bd2f675a72fac9a 100644 (file)
@@ -223,6 +223,7 @@ struct team {
                atomic_t count_pending;
                struct delayed_work dw;
        } mcast_rejoin;
+       struct lock_class_key team_lock_key;
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
index 244278d5c222d250816256ac455d868f415ecc99..b05e855f1ddd4f2ea1cb0282ad754a7a50f826fe 100644 (file)
@@ -182,7 +182,6 @@ struct vlan_dev_priv {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        struct netpoll                          *netpoll;
 #endif
-       unsigned int                            nest_level;
 };
 
 static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
@@ -221,11 +220,6 @@ extern void vlan_vids_del_by_dev(struct net_device *dev,
 
 extern bool vlan_uses_dev(const struct net_device *dev);
 
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG_ON(!is_vlan_dev(dev));
-       return vlan_dev_priv(dev)->nest_level;
-}
 #else
 static inline struct net_device *
 __vlan_find_dev_deep_rcu(struct net_device *real_dev,
@@ -295,11 +289,6 @@ static inline bool vlan_uses_dev(const struct net_device *dev)
 {
        return false;
 }
-static inline int vlan_get_encap_level(struct net_device *dev)
-{
-       BUG();
-       return 0;
-}
 #endif
 
 /**
index fcb46b3374c60967993cf6123172c35a7cac4277..719fc3e15ea4e0181f314718951f9ff4fc2505c6 100644 (file)
@@ -1090,6 +1090,7 @@ enum kvm_stat_kind {
 
 struct kvm_stat_data {
        int offset;
+       int mode;
        struct kvm *kvm;
 };
 
@@ -1097,6 +1098,7 @@ struct kvm_stats_debugfs_item {
        const char *name;
        int offset;
        enum kvm_stat_kind kind;
+       int mode;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
 extern struct dentry *kvm_debugfs_dir;
index b8df7119332980c4be0493ed6b78753b5a498b11..efb309dba914a90f53e6c811e2543cbffdc4f739 100644 (file)
@@ -247,7 +247,7 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
 /**
  * led_set_brightness_sync - set LED brightness synchronously
  * @led_cdev: the LED to set
- * @brightness: the brightness to set it to
+ * @value: the brightness to set it to
  *
  * Set an LED's brightness immediately. This function will block
  * the caller for the time required for accessing device registers,
@@ -301,8 +301,7 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
 /**
  * led_compose_name - compose LED class device name
  * @dev: LED controller device object
- * @child: child fwnode_handle describing a LED or a group of synchronized LEDs;
- *        it must be provided only for fwnode based LEDs
+ * @init_data: the LED class device initialization data
  * @led_classdev_name: composed LED class device name
  *
  * Create LED class device name basing on the provided init_data argument.
index 9b60863429ccff3c5d70e17fe5621ec4bf0a93bc..ae703ea3ef4841e293990b250252c2f562395065 100644 (file)
@@ -356,6 +356,19 @@ static inline bool mem_cgroup_disabled(void)
        return !cgroup_subsys_enabled(memory_cgrp_subsys);
 }
 
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+                                                 bool in_low_reclaim)
+{
+       if (mem_cgroup_disabled())
+               return 0;
+
+       if (in_low_reclaim)
+               return READ_ONCE(memcg->memory.emin);
+
+       return max(READ_ONCE(memcg->memory.emin),
+                  READ_ONCE(memcg->memory.elow));
+}
+
 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
                                                struct mem_cgroup *memcg);
 
@@ -537,6 +550,8 @@ void mem_cgroup_handle_over_high(void);
 
 unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
 
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg);
+
 void mem_cgroup_print_oom_context(struct mem_cgroup *memcg,
                                struct task_struct *p);
 
@@ -829,6 +844,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
 {
 }
 
+static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
+                                                 bool in_low_reclaim)
+{
+       return 0;
+}
+
 static inline enum mem_cgroup_protection mem_cgroup_protected(
        struct mem_cgroup *root, struct mem_cgroup *memcg)
 {
@@ -968,6 +989,11 @@ static inline unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
        return 0;
 }
 
+static inline unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+       return 0;
+}
+
 static inline void
 mem_cgroup_print_oom_context(struct mem_cgroup *memcg, struct task_struct *p)
 {
@@ -1264,6 +1290,9 @@ void mem_cgroup_track_foreign_dirty_slowpath(struct page *page,
 static inline void mem_cgroup_track_foreign_dirty(struct page *page,
                                                  struct bdi_writeback *wb)
 {
+       if (mem_cgroup_disabled())
+               return;
+
        if (unlikely(&page->mem_cgroup->css != wb->memcg_css))
                mem_cgroup_track_foreign_dirty_slowpath(page, wb);
 }
index ad24554f11f969d87762d460341622235f9a79ae..75f880c25bb86cacd9370a61bd2db66c9427296f 100644 (file)
@@ -31,7 +31,7 @@
 #define PHY_ID_KSZ886X         0x00221430
 #define PHY_ID_KSZ8863         0x00221435
 
-#define PHY_ID_KSZ8795         0x00221550
+#define PHY_ID_KSZ87XX         0x00221550
 
 #define        PHY_ID_KSZ9477          0x00221631
 
index 5cd824c1c0caa8c9adda4a8e6d640f43605cd4fb..4ce8901a1af656f4c16b70976ae7f543cb7b5b20 100644 (file)
@@ -455,6 +455,15 @@ static inline void mii_lpa_mod_linkmode_lpa_t(unsigned long *lp_advertising,
                         lp_advertising, lpa & LPA_LPACK);
 }
 
+static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising,
+                                                  u32 ctrl1000)
+{
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertising,
+                        ctrl1000 & ADVERTISE_1000HALF);
+       linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertising,
+                        ctrl1000 & ADVERTISE_1000FULL);
+}
+
 /**
  * linkmode_adv_to_lcl_adv_t
  * @advertising:pointer to linkmode advertising
index 138c50d5a3533d7ad0b2c8757e9fff08c9b1b388..0836fe232f975718532f89b5b6e23bf79f2272dc 100644 (file)
@@ -1545,9 +1545,8 @@ struct mlx5_ifc_extended_dest_format_bits {
 };
 
 union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
-       struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
+       struct mlx5_ifc_extended_dest_format_bits extended_dest_format;
        struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
-       u8         reserved_at_0[0x40];
 };
 
 struct mlx5_ifc_fte_match_param_bits {
index cc292273e6ba0c0ac56d38d16aa1f87d65730393..a2adf95b3f9c5e1e026a3d1db1cb7136c992ca83 100644 (file)
@@ -695,11 +695,6 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
 
 extern void kvfree(const void *addr);
 
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
-       return &page[1].compound_mapcount;
-}
-
 static inline int compound_mapcount(struct page *page)
 {
        VM_BUG_ON_PAGE(!PageCompound(page), page);
index 2222fa795284183344d603c5286fcfd8c40dffa3..270aa8fd2800b4234297fa7c27577705f8c24395 100644 (file)
@@ -221,6 +221,11 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+static inline atomic_t *compound_mapcount_ptr(struct page *page)
+{
+       return &page[1].compound_mapcount;
+}
+
 /*
  * Used for sizing the vmemmap region on some architectures
  */
index 9eda1c31d1f77956ac3fc382be25ac44906e83bc..c20f190b4c189615c4eebaa84da6b89c1077f223 100644 (file)
@@ -925,6 +925,7 @@ struct dev_ifalias {
 struct devlink;
 struct tlsdev_ops;
 
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -1421,7 +1422,6 @@ struct net_device_ops {
        void                    (*ndo_dfwd_del_station)(struct net_device *pdev,
                                                        void *priv);
 
-       int                     (*ndo_get_lock_subclass)(struct net_device *dev);
        int                     (*ndo_set_tx_maxrate)(struct net_device *dev,
                                                      int queue_index,
                                                      u32 maxrate);
@@ -1649,6 +1649,8 @@ enum netdev_priv_flags {
  *     @perm_addr:             Permanent hw address
  *     @addr_assign_type:      Hw address assignment type
  *     @addr_len:              Hardware address length
+ *     @upper_level:           Maximum depth level of upper devices.
+ *     @lower_level:           Maximum depth level of lower devices.
  *     @neigh_priv_len:        Used in neigh_alloc()
  *     @dev_id:                Used to differentiate devices that share
  *                             the same link layer address
@@ -1758,9 +1760,13 @@ enum netdev_priv_flags {
  *     @phydev:        Physical device may attach itself
  *                     for hardware timestamping
  *     @sfp_bus:       attached &struct sfp_bus structure.
- *
- *     @qdisc_tx_busylock: lockdep class annotating Qdisc->busylock spinlock
- *     @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
+                               spinlock
+ *     @qdisc_running_key:     lockdep class annotating Qdisc->running seqcount
+ *     @qdisc_xmit_lock_key:   lockdep class annotating
+ *                             netdev_queue->_xmit_lock spinlock
+ *     @addr_list_lock_key:    lockdep class annotating
+ *                             net_device->addr_list_lock spinlock
  *
  *     @proto_down:    protocol port state information can be sent to the
  *                     switch driver and used to set the phys state of the
@@ -1875,6 +1881,8 @@ struct net_device {
        unsigned char           perm_addr[MAX_ADDR_LEN];
        unsigned char           addr_assign_type;
        unsigned char           addr_len;
+       unsigned char           upper_level;
+       unsigned char           lower_level;
        unsigned short          neigh_priv_len;
        unsigned short          dev_id;
        unsigned short          dev_port;
@@ -2045,8 +2053,10 @@ struct net_device {
 #endif
        struct phy_device       *phydev;
        struct sfp_bus          *sfp_bus;
-       struct lock_class_key   *qdisc_tx_busylock;
-       struct lock_class_key   *qdisc_running_key;
+       struct lock_class_key   qdisc_tx_busylock_key;
+       struct lock_class_key   qdisc_running_key;
+       struct lock_class_key   qdisc_xmit_lock_key;
+       struct lock_class_key   addr_list_lock_key;
        bool                    proto_down;
        unsigned                wol_enabled:1;
 };
@@ -2124,23 +2134,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
                f(dev, &dev->_tx[i], arg);
 }
 
-#define netdev_lockdep_set_classes(dev)                                \
-{                                                              \
-       static struct lock_class_key qdisc_tx_busylock_key;     \
-       static struct lock_class_key qdisc_running_key;         \
-       static struct lock_class_key qdisc_xmit_lock_key;       \
-       static struct lock_class_key dev_addr_list_lock_key;    \
-       unsigned int i;                                         \
-                                                               \
-       (dev)->qdisc_tx_busylock = &qdisc_tx_busylock_key;      \
-       (dev)->qdisc_running_key = &qdisc_running_key;          \
-       lockdep_set_class(&(dev)->addr_list_lock,               \
-                         &dev_addr_list_lock_key);             \
-       for (i = 0; i < (dev)->num_tx_queues; i++)              \
-               lockdep_set_class(&(dev)->_tx[i]._xmit_lock,    \
-                                 &qdisc_xmit_lock_key);        \
-}
-
 u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
                     struct net_device *sb_dev);
 struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
@@ -3139,6 +3132,7 @@ static inline void netif_stop_queue(struct net_device *dev)
 }
 
 void netif_tx_stop_all_queues(struct net_device *dev);
+void netdev_update_lockdep_key(struct net_device *dev);
 
 static inline bool netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
 {
@@ -4056,16 +4050,6 @@ static inline void netif_addr_lock(struct net_device *dev)
        spin_lock(&dev->addr_list_lock);
 }
 
-static inline void netif_addr_lock_nested(struct net_device *dev)
-{
-       int subclass = SINGLE_DEPTH_NESTING;
-
-       if (dev->netdev_ops->ndo_get_lock_subclass)
-               subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
-
-       spin_lock_nested(&dev->addr_list_lock, subclass);
-}
-
 static inline void netif_addr_lock_bh(struct net_device *dev)
 {
        spin_lock_bh(&dev->addr_list_lock);
@@ -4329,6 +4313,16 @@ int netdev_master_upper_dev_link(struct net_device *dev,
                                 struct netlink_ext_ack *extack);
 void netdev_upper_dev_unlink(struct net_device *dev,
                             struct net_device *upper_dev);
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack);
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev);
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev);
 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
 void *netdev_lower_dev_get_private(struct net_device *dev,
                                   struct net_device *lower_dev);
@@ -4340,7 +4334,6 @@ void netdev_lower_state_changed(struct net_device *lower_dev,
 extern u8 netdev_rss_key[NETDEV_RSS_KEY_LEN] __read_mostly;
 void netdev_rss_key_fill(void *buffer, size_t len);
 
-int dev_get_nest_level(struct net_device *dev);
 int skb_checksum_help(struct sk_buff *skb);
 int skb_crc32c_csum_help(struct sk_buff *skb);
 int skb_csum_hwoffload_help(struct sk_buff *skb,
index f91cb8898ff0af60d751f7c026c7b9f852fe620b..1bf83c8fcaa77e31cd3acd8e2f16c54e6ea05bf5 100644 (file)
@@ -622,12 +622,28 @@ static inline int PageTransCompound(struct page *page)
  *
  * Unlike PageTransCompound, this is safe to be called only while
  * split_huge_pmd() cannot run from under us, like if protected by the
- * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * MMU notifier, otherwise it may result in page->_mapcount check false
  * positives.
+ *
+ * We have to treat page cache THP differently since every subpage of it
+ * would get _mapcount inc'ed once it is PMD mapped.  But, it may be PTE
+ * mapped in the current process so comparing subpage's _mapcount to
+ * compound_mapcount to filter out PTE mapped case.
  */
 static inline int PageTransCompoundMap(struct page *page)
 {
-       return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+       struct page *head;
+
+       if (!PageTransCompound(page))
+               return 0;
+
+       if (PageAnon(page))
+               return atomic_read(&page->_mapcount) < 0;
+
+       head = compound_head(page);
+       /* File THP is PMD mapped and not PTE mapped */
+       return atomic_read(&page->_mapcount) ==
+              atomic_read(compound_mapcount_ptr(head));
 }
 
 /*
index 682fd465df060c0ac2cde1f67b7b6c11090dd17f..cfce186f0c4e0ecd8be7c211e8a3fbb95c963242 100644 (file)
@@ -18,7 +18,7 @@ struct page_ext_operations {
 
 enum page_ext_flags {
        PAGE_EXT_OWNER,
-       PAGE_EXT_OWNER_ACTIVE,
+       PAGE_EXT_OWNER_ALLOCATED,
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
        PAGE_EXT_YOUNG,
        PAGE_EXT_IDLE,
@@ -36,6 +36,7 @@ struct page_ext {
        unsigned long flags;
 };
 
+extern unsigned long page_ext_size;
 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
 
 #ifdef CONFIG_SPARSEMEM
@@ -52,6 +53,13 @@ static inline void page_ext_init(void)
 
 struct page_ext *lookup_page_ext(const struct page *page);
 
+static inline struct page_ext *page_ext_next(struct page_ext *curr)
+{
+       void *next = curr;
+       next += page_ext_size;
+       return next;
+}
+
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
index 61448c19a132c29c92238e782330401ddf0fba17..68ccc5b1913b485ba940dc6fa283a6ec7c0a381d 100644 (file)
@@ -292,7 +292,7 @@ struct pmu {
         *  -EBUSY      -- @event is for this PMU but PMU temporarily unavailable
         *  -EINVAL     -- @event is for this PMU but @event is not valid
         *  -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
-        *  -EACCESS    -- @event is for this PMU, @event is valid, but no privilidges
+        *  -EACCES     -- @event is for this PMU, @event is valid, but no privileges
         *
         *  0           -- @event is for this PMU and valid
         *
index a7ecbe0e55aa51180c3707ea77809aa52426d8f4..9a0e981df5024778f348f51207585cd944ceb82a 100644 (file)
@@ -678,6 +678,7 @@ static inline bool phy_is_started(struct phy_device *phydev)
        return phydev->state >= PHY_UP;
 }
 
+void phy_resolve_aneg_pause(struct phy_device *phydev);
 void phy_resolve_aneg_linkmode(struct phy_device *phydev);
 
 /**
@@ -1076,6 +1077,7 @@ int genphy_config_eee_advert(struct phy_device *phydev);
 int __genphy_config_aneg(struct phy_device *phydev, bool changed);
 int genphy_aneg_done(struct phy_device *phydev);
 int genphy_update_link(struct phy_device *phydev);
+int genphy_read_lpa(struct phy_device *phydev);
 int genphy_read_status(struct phy_device *phydev);
 int genphy_suspend(struct phy_device *phydev);
 int genphy_resume(struct phy_device *phydev);
index 6eaa53cef0bd228b8a0d7bb2b3938ff04c548f95..30e676b36b247a595c39e98d7c59bdaa7e6eb233 100644 (file)
@@ -51,7 +51,10 @@ struct sdma_script_start_addrs {
        /* End of v2 array */
        s32 zcanfd_2_mcu_addr;
        s32 zqspi_2_mcu_addr;
+       s32 mcu_2_ecspi_addr;
        /* End of v3 array */
+       s32 mcu_2_zqspi_addr;
+       /* End of v4 array */
 };
 
 /**
index 1b5cec0675335d07cf5989fa9ffe38a1351226db..f2688404d1cd73d3423d3443304a975306f4e0a1 100644 (file)
@@ -64,6 +64,8 @@ extern struct resource *platform_get_resource_byname(struct platform_device *,
                                                     unsigned int,
                                                     const char *);
 extern int platform_get_irq_byname(struct platform_device *, const char *);
+extern int platform_get_irq_byname_optional(struct platform_device *dev,
+                                           const char *name);
 extern int platform_add_devices(struct platform_device **, int);
 
 struct platform_device_info {
index 222c3e01397c17674ea6811aaefc330c3a3077a1..ebf5ef17cc2ae3f45a1ff5379c387e2d0bc9a089 100644 (file)
@@ -34,8 +34,6 @@ enum pm_qos_flags_status {
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT    PM_QOS_LATENCY_ANY
 #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
 #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
-#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE     0
-#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE     (-1)
 #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
 
 #define PM_QOS_FLAG_NO_POWER_OFF       (1 << 0)
@@ -54,8 +52,6 @@ struct pm_qos_flags_request {
 enum dev_pm_qos_req_type {
        DEV_PM_QOS_RESUME_LATENCY = 1,
        DEV_PM_QOS_LATENCY_TOLERANCE,
-       DEV_PM_QOS_MIN_FREQUENCY,
-       DEV_PM_QOS_MAX_FREQUENCY,
        DEV_PM_QOS_FLAGS,
 };
 
@@ -97,14 +93,10 @@ struct pm_qos_flags {
 struct dev_pm_qos {
        struct pm_qos_constraints resume_latency;
        struct pm_qos_constraints latency_tolerance;
-       struct pm_qos_constraints min_frequency;
-       struct pm_qos_constraints max_frequency;
        struct pm_qos_flags flags;
        struct dev_pm_qos_request *resume_latency_req;
        struct dev_pm_qos_request *latency_tolerance_req;
        struct dev_pm_qos_request *flags_req;
-       struct dev_pm_qos_request *min_frequency_req;
-       struct dev_pm_qos_request *max_frequency_req;
 };
 
 /* Action requested to pm_qos_update_target */
@@ -199,10 +191,6 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
        switch (type) {
        case DEV_PM_QOS_RESUME_LATENCY:
                return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
-       case DEV_PM_QOS_MIN_FREQUENCY:
-               return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
-       case DEV_PM_QOS_MAX_FREQUENCY:
-               return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
        default:
                WARN_ON(1);
                return 0;
@@ -267,4 +255,48 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
 }
 #endif
 
+#define FREQ_QOS_MIN_DEFAULT_VALUE     0
+#define FREQ_QOS_MAX_DEFAULT_VALUE     (-1)
+
+enum freq_qos_req_type {
+       FREQ_QOS_MIN = 1,
+       FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+       struct pm_qos_constraints min_freq;
+       struct blocking_notifier_head min_freq_notifiers;
+       struct pm_qos_constraints max_freq;
+       struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+       enum freq_qos_req_type type;
+       struct plist_node pnode;
+       struct freq_constraints *qos;
+};
+
+static inline int freq_qos_request_active(struct freq_qos_request *req)
+{
+       return !IS_ERR_OR_NULL(req->qos);
+}
+
+void freq_constraints_init(struct freq_constraints *qos);
+
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type);
+
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value);
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
+int freq_qos_remove_request(struct freq_qos_request *req);
+
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier);
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier);
+
 #endif
index 2c2e56bd8913250e88982ee8e47e36b23081fe23..67a1d86981a9d342a0ca1f0fefd7532526dfe467 100644 (file)
@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
 extern long schedule_timeout_idle(long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
+asmlinkage void preempt_schedule_irq(void);
 
 extern int __must_check io_schedule_prepare(void);
 extern void io_schedule_finish(int token);
index a8d59d612d274bdb5df8cbd13245b35fc2e08908..9df7547afc0cb783883b5c5c7b32b2956a650d7a 100644 (file)
@@ -105,6 +105,7 @@ enum lockdown_reason {
        LOCKDOWN_NONE,
        LOCKDOWN_MODULE_SIGNATURE,
        LOCKDOWN_DEV_MEM,
+       LOCKDOWN_EFI_TEST,
        LOCKDOWN_KEXEC,
        LOCKDOWN_HIBERNATION,
        LOCKDOWN_PCI_ACCESS,
index e7d3b1a513ef028fdbb0df2ce4014c58c28787c8..64a395c7f6895ce4dc439571de2eec8673b393e4 100644 (file)
@@ -1354,7 +1354,8 @@ static inline __u32 skb_get_hash_flowi6(struct sk_buff *skb, const struct flowi6
        return skb->hash;
 }
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb);
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb);
 
 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 {
@@ -1494,6 +1495,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
        return list->next == (const struct sk_buff *) list;
 }
 
+/**
+ *     skb_queue_empty_lockless - check if a queue is empty
+ *     @list: queue head
+ *
+ *     Returns true if the queue is empty, false otherwise.
+ *     This variant can be used in lockless contexts.
+ */
+static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list)
+{
+       return READ_ONCE(list->next) == (const struct sk_buff *) list;
+}
+
+
 /**
  *     skb_queue_is_last - check if skb is the last entry in the queue
  *     @list: queue head
@@ -1847,9 +1861,11 @@ static inline void __skb_insert(struct sk_buff *newsk,
                                struct sk_buff *prev, struct sk_buff *next,
                                struct sk_buff_head *list)
 {
-       newsk->next = next;
-       newsk->prev = prev;
-       next->prev  = prev->next = newsk;
+       /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+       WRITE_ONCE(newsk->next, next);
+       WRITE_ONCE(newsk->prev, prev);
+       WRITE_ONCE(next->prev, newsk);
+       WRITE_ONCE(prev->next, newsk);
        list->qlen++;
 }
 
@@ -1860,11 +1876,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list,
        struct sk_buff *first = list->next;
        struct sk_buff *last = list->prev;
 
-       first->prev = prev;
-       prev->next = first;
+       WRITE_ONCE(first->prev, prev);
+       WRITE_ONCE(prev->next, first);
 
-       last->next = next;
-       next->prev = last;
+       WRITE_ONCE(last->next, next);
+       WRITE_ONCE(next->prev, last);
 }
 
 /**
@@ -2005,8 +2021,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
        next       = skb->next;
        prev       = skb->prev;
        skb->next  = skb->prev = NULL;
-       next->prev = prev;
-       prev->next = next;
+       WRITE_ONCE(next->prev, prev);
+       WRITE_ONCE(prev->next, next);
 }
 
 /**
@@ -3510,8 +3526,9 @@ int skb_ensure_writable(struct sk_buff *skb, int write_len);
 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
 int skb_vlan_pop(struct sk_buff *skb);
 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto);
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto);
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+                 int mac_len);
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
 int skb_mpls_dec_ttl(struct sk_buff *skb);
 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -4160,15 +4177,12 @@ static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {}
 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {}
 #endif /* CONFIG_SKB_EXTENSIONS */
 
-static inline void nf_reset(struct sk_buff *skb)
+static inline void nf_reset_ct(struct sk_buff *skb)
 {
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
        nf_conntrack_put(skb_nfct(skb));
        skb->_nfct = 0;
 #endif
-#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
-       skb_ext_del(skb, SKB_EXT_BRIDGE_NF);
-#endif
 }
 
 static inline void nf_reset_trace(struct sk_buff *skb)
index ab2b98ad76e102f14eeaeb412c9769dd714b02d5..4d2a2fa55ed55ad7930f275db30b754752683d07 100644 (file)
@@ -493,6 +493,10 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
  * kmalloc is the normal method of allocating memory
  * for objects smaller than page size in the kernel.
  *
+ * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
+ * bytes. For @size of power of two bytes, the alignment is also guaranteed
+ * to be at least to the size.
+ *
  * The @flags argument may be one of the GFP flags defined at
  * include/linux/gfp.h and described at
  * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
index fc0bed59fc84ef8e6631d3c275853d52a46f84aa..4049d9755cf198bdda600a61485b36a888b9d879 100644 (file)
@@ -263,7 +263,7 @@ struct ucred {
 #define PF_MAX         AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
-#define SOMAXCONN      128
+#define SOMAXCONN      4096
 
 /* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
index b2f9df7f07619edff7e9bbdaa23a40782752ad99..b6ccdc2c7f02639cd586244a79309fa2ddb1f635 100644 (file)
@@ -227,7 +227,26 @@ static inline bool strstarts(const char *str, const char *prefix)
 }
 
 size_t memweight(const void *ptr, size_t bytes);
-void memzero_explicit(void *s, size_t count);
+
+/**
+ * memzero_explicit - Fill a region of memory (e.g. sensitive
+ *                   keying data) with 0s.
+ * @s: Pointer to the start of the area.
+ * @count: The size of the area.
+ *
+ * Note: usually using memset() is just fine (!), but in cases
+ * where clearing out _local_ data at the end of a scope is
+ * necessary, memzero_explicit() should be used instead in
+ * order to prevent the compiler from optimising away zeroing.
+ *
+ * memzero_explicit() doesn't need an arch-specific version as
+ * it just invokes the one of memset() implicitly.
+ */
+static inline void memzero_explicit(void *s, size_t count)
+{
+       memset(s, 0, count);
+       barrier_data(s);
+}
 
 /**
  * kbasename - return the last part of a pathname.
index 87d27e13d8859742044849854288a7f5bec76dfe..d796058cdff2a82b0c65450f988f29683513cea8 100644 (file)
@@ -64,6 +64,11 @@ static inline int xprt_setup_backchannel(struct rpc_xprt *xprt,
        return 0;
 }
 
+static inline void xprt_destroy_backchannel(struct rpc_xprt *xprt,
+                                           unsigned int max_reqs)
+{
+}
+
 static inline bool svc_is_backchannel(const struct svc_rqst *rqstp)
 {
        return false;
index 7638dbe7bc50026eebcd8cf8f519ea49b4999a41..a940de03808dd328a6743a405b6cf8df3257878f 100644 (file)
@@ -61,6 +61,7 @@ struct sock_xprt {
        struct mutex            recv_mutex;
        struct sockaddr_storage srcaddr;
        unsigned short          srcport;
+       int                     xprt_err;
 
        /*
         * UDP socket buffer size parameters
index 5420817ed317ebf3642758dd950d3994458eb977..fa7ee503fb76326d181d844aad22700bd85bf7fa 100644 (file)
@@ -196,9 +196,9 @@ struct bin_attribute {
        .size   = _size,                                                \
 }
 
-#define __BIN_ATTR_WO(_name) {                                         \
+#define __BIN_ATTR_WO(_name, _size) {                                  \
        .attr   = { .name = __stringify(_name), .mode = 0200 },         \
-       .store  = _name##_store,                                        \
+       .write  = _name##_write,                                        \
        .size   = _size,                                                \
 }
 
index 99617e528ea2906b234cadc1fb3d79d0efd46331..668e25a76d69809b5ade30f5774a4c62833b8b9b 100644 (file)
@@ -393,7 +393,7 @@ struct tcp_sock {
        /* fastopen_rsk points to request_sock that resulted in this big
         * socket. Used to retransmit SYNACKs etc.
         */
-       struct request_sock *fastopen_rsk;
+       struct request_sock __rcu *fastopen_rsk;
        u32     *saved_syn;
 };
 
@@ -447,8 +447,8 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
 
 static inline bool tcp_passive_fastopen(const struct sock *sk)
 {
-       return (sk->sk_state == TCP_SYN_RECV &&
-               tcp_sk(sk)->fastopen_rsk != NULL);
+       return sk->sk_state == TCP_SYN_RECV &&
+              rcu_access_pointer(tcp_sk(sk)->fastopen_rsk) != NULL;
 }
 
 static inline void fastopen_queue_tune(struct sock *sk, int backlog)
index 63238c84dc0b4eee0cda785a698d808be5dbca75..131ea1bad458bee37bb418d05c1504f90c2abc16 100644 (file)
@@ -152,7 +152,7 @@ struct tcg_algorithm_info {
  * total. Once we've done this we know the offset of the data length field,
  * and can calculate the total size of the event.
  *
- * Return: size of the event on success, <0 on failure
+ * Return: size of the event on success, 0 on failure
  */
 
 static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
@@ -170,6 +170,7 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        u16 halg;
        int i;
        int j;
+       u32 count, event_type;
 
        marker = event;
        marker_start = marker;
@@ -190,16 +191,22 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
        }
 
        event = (struct tcg_pcr_event2_head *)mapping;
+       /*
+        * The loop below will unmap these fields if the log is larger than
+        * one page, so save them here for reference:
+        */
+       count = READ_ONCE(event->count);
+       event_type = READ_ONCE(event->event_type);
 
        efispecid = (struct tcg_efi_specid_event_head *)event_header->event;
 
        /* Check if event is malformed. */
-       if (event->count > efispecid->num_algs) {
+       if (count > efispecid->num_algs) {
                size = 0;
                goto out;
        }
 
-       for (i = 0; i < event->count; i++) {
+       for (i = 0; i < count; i++) {
                halg_size = sizeof(event->digests[i].alg_id);
 
                /* Map the digest's algorithm identifier */
@@ -256,8 +263,9 @@ static inline int __calc_tpm2_event_size(struct tcg_pcr_event2_head *event,
                + event_field->event_size;
        size = marker - marker_start;
 
-       if ((event->event_type == 0) && (event_field->event_size == 0))
+       if (event_type == 0 && event_field->event_size == 0)
                size = 0;
+
 out:
        if (do_mapping)
                TPM_MEMUNMAP(mapping, mapping_size);
index 70bbdc38dc37d3cafe82df3b140df557d3b6d7a9..d4ee6e9425625391d93eabe4a6d72cd0d4155315 100644 (file)
@@ -231,6 +231,76 @@ __copy_from_user_inatomic_nocache(void *to, const void __user *from,
 
 #endif         /* ARCH_HAS_NOCACHE_UACCESS */
 
+extern __must_check int check_zeroed_user(const void __user *from, size_t size);
+
+/**
+ * copy_struct_from_user: copy a struct from userspace
+ * @dst:   Destination address, in kernel space. This buffer must be @ksize
+ *         bytes long.
+ * @ksize: Size of @dst struct.
+ * @src:   Source address, in userspace.
+ * @usize: (Alleged) size of @src struct.
+ *
+ * Copies a struct from userspace to kernel space, in a way that guarantees
+ * backwards-compatibility for struct syscall arguments (as long as future
+ * struct extensions are made such that all new fields are *appended* to the
+ * old struct, and zeroed-out new fields have the same meaning as the old
+ * struct).
+ *
+ * @ksize is just sizeof(*dst), and @usize should've been passed by userspace.
+ * The recommended usage is something like the following:
+ *
+ *   SYSCALL_DEFINE2(foobar, const struct foo __user *, uarg, size_t, usize)
+ *   {
+ *      int err;
+ *      struct foo karg = {};
+ *
+ *      if (usize > PAGE_SIZE)
+ *        return -E2BIG;
+ *      if (usize < FOO_SIZE_VER0)
+ *        return -EINVAL;
+ *
+ *      err = copy_struct_from_user(&karg, sizeof(karg), uarg, usize);
+ *      if (err)
+ *        return err;
+ *
+ *      // ...
+ *   }
+ *
+ * There are three cases to consider:
+ *  * If @usize == @ksize, then it's copied verbatim.
+ *  * If @usize < @ksize, then the userspace has passed an old struct to a
+ *    newer kernel. The rest of the trailing bytes in @dst (@ksize - @usize)
+ *    are to be zero-filled.
+ *  * If @usize > @ksize, then the userspace has passed a new struct to an
+ *    older kernel. The trailing bytes unknown to the kernel (@usize - @ksize)
+ *    are checked to ensure they are zeroed, otherwise -E2BIG is returned.
+ *
+ * Returns (in all cases, some data may have been copied):
+ *  * -E2BIG:  (@usize > @ksize) and there are non-zero trailing bytes in @src.
+ *  * -EFAULT: access to userspace failed.
+ */
+static __always_inline __must_check int
+copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
+                     size_t usize)
+{
+       size_t size = min(ksize, usize);
+       size_t rest = max(ksize, usize) - size;
+
+       /* Deal with trailing bytes. */
+       if (usize < ksize) {
+               memset(dst + size, 0, rest);
+       } else if (usize > ksize) {
+               int ret = check_zeroed_user(src + size, rest);
+               if (ret <= 0)
+                       return ret ?: -E2BIG;
+       }
+       /* Copy the interoperable parts of the struct. */
+       if (copy_from_user(dst, src, size))
+               return -EFAULT;
+       return 0;
+}
+
 /*
  * probe_kernel_read(): safely attempt to read from a location
  * @dst: pointer to the buffer that shall take the data
@@ -285,8 +355,10 @@ extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
 #ifndef user_access_begin
 #define user_access_begin(ptr,len) access_ok(ptr, len)
 #define user_access_end() do { } while (0)
-#define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0)
-#define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0)
+#define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
+#define unsafe_get_user(x,p,e) unsafe_op_wrap(__get_user(x,p),e)
+#define unsafe_put_user(x,p,e) unsafe_op_wrap(__put_user(x,p),e)
+#define unsafe_copy_to_user(d,s,l,e) unsafe_op_wrap(__copy_to_user(d,s,l),e)
 static inline unsigned long user_access_save(void) { return 0UL; }
 static inline void user_access_restore(unsigned long flags) { }
 #endif
index 4c7781f4b29b6491204cb14cd7ce829a0f867588..07875ccc7bb50e9ec44a1886e4649aa27be1a2f8 100644 (file)
@@ -48,7 +48,6 @@ struct virtio_vsock_sock {
 
 struct virtio_vsock_pkt {
        struct virtio_vsock_hdr hdr;
-       struct work_struct work;
        struct list_head list;
        /* socket refcnt not held, only use for cancellation */
        struct vsock_sock *vsk;
index 5921599b6dc4f5f573ebf28a3a5553e0e391eda0..86eecbd98e84ab78c13fbdda228b173f9bb404f5 100644 (file)
@@ -230,8 +230,8 @@ static inline int xa_err(void *entry)
  * This structure is used either directly or via the XA_LIMIT() macro
  * to communicate the range of IDs that are valid for allocation.
  * Two common ranges are predefined for you:
- *  * xa_limit_32b     - [0 - UINT_MAX]
- *  * xa_limit_31b     - [0 - INT_MAX]
+ * * xa_limit_32b      - [0 - UINT_MAX]
+ * * xa_limit_31b      - [0 - INT_MAX]
  */
 struct xa_limit {
        u32 max;
index f7fe45689142150231a74247f35811ce4dd77600..1afc125014dac426c00af8807cc9f908345cbbb1 100644 (file)
@@ -203,7 +203,6 @@ struct bonding {
        struct   slave __rcu *primary_slave;
        struct   bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
        bool     force_primary;
-       u32      nest_level;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
        int     (*recv_probe)(const struct sk_buff *, struct bonding *,
                              struct slave *);
@@ -239,6 +238,7 @@ struct bonding {
        struct   dentry *debug_dir;
 #endif /* CONFIG_DEBUG_FS */
        struct rtnl_link_stats64 bond_stats;
+       struct lock_class_key stats_lock_key;
 };
 
 #define bond_slave_get_rcu(dev) \
index 127a5c4e369914814c3004d2701ebb5ae026ca95..86e028388badc46977bf6c05bd8f718d852c14a8 100644 (file)
@@ -122,7 +122,7 @@ static inline void skb_mark_napi_id(struct sk_buff *skb,
 static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       sk->sk_napi_id = skb->napi_id;
+       WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
        sk_rx_queue_set(sk, skb);
 }
@@ -132,8 +132,8 @@ static inline void sk_mark_napi_id_once(struct sock *sk,
                                        const struct sk_buff *skb)
 {
 #ifdef CONFIG_NET_RX_BUSY_POLL
-       if (!sk->sk_napi_id)
-               sk->sk_napi_id = skb->napi_id;
+       if (!READ_ONCE(sk->sk_napi_id))
+               WRITE_ONCE(sk->sk_napi_id, skb->napi_id);
 #endif
 }
 
index ff45c3e1abff820cbfa93715e681592cd32576af..4ab2c49423dcba4b42706cf523cefe4fee672da3 100644 (file)
@@ -5549,6 +5549,14 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
  */
 const char *reg_initiator_name(enum nl80211_reg_initiator initiator);
 
+/**
+ * regulatory_pre_cac_allowed - check if pre-CAC allowed in the current regdom
+ * @wiphy: wiphy for which pre-CAC capability is checked.
+ *
+ * Pre-CAC is allowed only in some regdomains (notable ETSI).
+ */
+bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
+
 /**
  * DOC: Internal regulatory db functions
  *
index 90bd210be06075818a8ba3213006e16eb610c602..5cd12276ae21a52006debcd846c85be1b04bf596 100644 (file)
@@ -4,6 +4,7 @@
 
 #include <linux/types.h>
 #include <linux/in6.h>
+#include <linux/siphash.h>
 #include <uapi/linux/if_ether.h>
 
 /**
@@ -276,7 +277,7 @@ struct flow_keys_basic {
 struct flow_keys {
        struct flow_dissector_key_control control;
 #define FLOW_KEYS_HASH_START_FIELD basic
-       struct flow_dissector_key_basic basic;
+       struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT);
        struct flow_dissector_key_tags tags;
        struct flow_dissector_key_vlan vlan;
        struct flow_dissector_key_vlan cvlan;
index d126b5d2026155aa7311ea82ce609122c6da45e0..2ad85e68304142fc3193e8a2c4dc1cfb4cca821a 100644 (file)
@@ -69,7 +69,7 @@ struct fq {
        struct list_head backlogs;
        spinlock_t lock;
        u32 flows_cnt;
-       u32 perturbation;
+       siphash_key_t   perturbation;
        u32 limit;
        u32 memory_limit;
        u32 memory_usage;
index be40a4b327e3d1d51f15f29b45216b39e7d1f42e..107c0d700ed6ffd03cd1dc835f086eb989adbed3 100644 (file)
@@ -108,7 +108,7 @@ begin:
 
 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb)
 {
-       u32 hash = skb_get_hash_perturb(skb, fq->perturbation);
+       u32 hash = skb_get_hash_perturb(skb, &fq->perturbation);
 
        return reciprocal_scale(hash, fq->flows_cnt);
 }
@@ -308,7 +308,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
        INIT_LIST_HEAD(&fq->backlogs);
        spin_lock_init(&fq->lock);
        fq->flows_cnt = max_t(u32, flows_cnt, 1);
-       fq->perturbation = prandom_u32();
+       get_random_bytes(&fq->perturbation, sizeof(fq->perturbation));
        fq->quantum = 300;
        fq->limit = 8192;
        fq->memory_limit = 16 << 20; /* 16 MBytes */
index 81643cf8a1c43ea5d7a61aba0722fd2c37e55f2c..c81444611a228e51413fd162a67b2bec44400012 100644 (file)
@@ -21,9 +21,13 @@ void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf);
 int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp);
 int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num);
 #else
-void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
-int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; }
-int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num)
+static inline void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {}
+
+static inline int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp)
+{ return 0; }
+
+static inline int hwbm_pool_add(struct hwbm_pool *bm_pool,
+                               unsigned int buf_num)
 { return 0; }
 #endif /* CONFIG_HWBM */
 #endif /* _HWBM_H */
index 95bb77f95bcc37404c143f7176117c72c2023d27..a2c61c36dc4afc2c2e334eeb2174f9e5636a106e 100644 (file)
@@ -185,7 +185,7 @@ static inline struct sk_buff *ip_fraglist_next(struct ip_fraglist_iter *iter)
 }
 
 struct ip_frag_state {
-       struct iphdr    *iph;
+       bool            DF;
        unsigned int    hlen;
        unsigned int    ll_rs;
        unsigned int    mtu;
@@ -196,7 +196,7 @@ struct ip_frag_state {
 };
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int ll_rs,
-                 unsigned int mtu, struct ip_frag_state *state);
+                 unsigned int mtu, bool DF, struct ip_frag_state *state);
 struct sk_buff *ip_frag_next(struct sk_buff *skb,
                             struct ip_frag_state *state);
 
index 3759167f91f5643ea5a192eb7e82ce3a34e9e91b..078887c8c586ad3fba436261462118ad9c47cc81 100644 (file)
@@ -889,6 +889,7 @@ struct netns_ipvs {
        struct delayed_work     defense_work;   /* Work handler */
        int                     drop_rate;
        int                     drop_counter;
+       int                     old_secure_tcp;
        atomic_t                dropentry;
        /* locks in ctl.c */
        spinlock_t              dropentry_lock;  /* drop entry handling */
index df528a6235487d3678ffdccb28facf977a1e7098..ea985aa7a6c5e64b6802c2399ad47db23fdbd9bf 100644 (file)
@@ -104,7 +104,7 @@ void llc_sk_reset(struct sock *sk);
 
 /* Access to a connection */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb);
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_rtn_pdu(struct sock *sk, struct sk_buff *skb);
 void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit);
 void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit);
index f8712bbeb2e039657e5cf8d37b15511de8c9c694..c7e15a213ef2fe547b8dfc4c8e1638bd2b5b499b 100644 (file)
@@ -52,6 +52,9 @@ struct bpf_prog;
 #define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
 
 struct net {
+       /* First cache line can be often dirtied.
+        * Do not place here read-mostly fields.
+        */
        refcount_t              passive;        /* To decide when the network
                                                 * namespace should be freed.
                                                 */
@@ -60,7 +63,13 @@ struct net {
                                                 */
        spinlock_t              rules_mod_lock;
 
-       u32                     hash_mix;
+       unsigned int            dev_unreg_count;
+
+       unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
+       int                     ifindex;
+
+       spinlock_t              nsid_lock;
+       atomic_t                fnhe_genid;
 
        struct list_head        list;           /* list of network namespaces */
        struct list_head        exit_list;      /* To linked to call pernet exit
@@ -76,11 +85,11 @@ struct net {
 #endif
        struct user_namespace   *user_ns;       /* Owning user namespace */
        struct ucounts          *ucounts;
-       spinlock_t              nsid_lock;
        struct idr              netns_ids;
 
        struct ns_common        ns;
 
+       struct list_head        dev_base_head;
        struct proc_dir_entry   *proc_net;
        struct proc_dir_entry   *proc_net_stat;
 
@@ -93,17 +102,18 @@ struct net {
 
        struct uevent_sock      *uevent_sock;           /* uevent socket */
 
-       struct list_head        dev_base_head;
        struct hlist_head       *dev_name_head;
        struct hlist_head       *dev_index_head;
-       unsigned int            dev_base_seq;   /* protected by rtnl_mutex */
-       int                     ifindex;
-       unsigned int            dev_unreg_count;
+       /* Note that @hash_mix can be read millions times per second,
+        * it is critical that it is on a read_mostly cache line.
+        */
+       u32                     hash_mix;
+
+       struct net_device       *loopback_dev;          /* The loopback */
 
        /* core fib_rules */
        struct list_head        rules_ops;
 
-       struct net_device       *loopback_dev;          /* The loopback */
        struct netns_core       core;
        struct netns_mib        mib;
        struct netns_packet     packet;
@@ -171,7 +181,6 @@ struct net {
        struct sock             *crypto_nlsk;
 #endif
        struct sock             *diag_nlsk;
-       atomic_t                fnhe_genid;
 } __randomize_layout;
 
 #include <linux/seq_file_net.h>
@@ -333,7 +342,7 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
 #define __net_initconst        __initconst
 #endif
 
-int peernet2id_alloc(struct net *net, struct net *peer);
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
 int peernet2id(struct net *net, struct net *peer);
 bool peernet_has_id(struct net *net, struct net *peer);
 struct net *get_net_ns_by_id(struct net *net, int id);
index fd178d58fa84e7ae7abdeff5be2ba7b1ec790889..cf8b33213bbc283348fb7470c1ce8fd55cd9d508 100644 (file)
@@ -185,7 +185,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 
 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
 {
-       return queue->rskq_accept_head == NULL;
+       return READ_ONCE(queue->rskq_accept_head) == NULL;
 }
 
 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
@@ -197,7 +197,7 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
        req = queue->rskq_accept_head;
        if (req) {
                sk_acceptq_removed(parent);
-               queue->rskq_accept_head = req->dl_next;
+               WRITE_ONCE(queue->rskq_accept_head, req->dl_next);
                if (queue->rskq_accept_head == NULL)
                        queue->rskq_accept_tail = NULL;
        }
index 5d60f13d2347b4533b6b4074f920b9abe10fe4a1..3ab5c6bbb90bd56b6856faec41d07f77346c028a 100644 (file)
@@ -610,4 +610,9 @@ static inline __u32 sctp_min_frag_point(struct sctp_sock *sp, __u16 datasize)
        return sctp_mtu_payload(sp, SCTP_DEFAULT_MINSEGMENT, datasize);
 }
 
+static inline bool sctp_newsk_ready(const struct sock *sk)
+{
+       return sock_flag(sk, SOCK_DEAD) || sk->sk_socket;
+}
+
 #endif /* __net_sctp_h__ */
index 2c53f1a1d905409247b1bdafdfaf99d86e430cd0..8f9adcfac41bea7e46062851a25c042261323679 100644 (file)
@@ -878,12 +878,17 @@ static inline bool sk_acceptq_is_full(const struct sock *sk)
  */
 static inline int sk_stream_min_wspace(const struct sock *sk)
 {
-       return sk->sk_wmem_queued >> 1;
+       return READ_ONCE(sk->sk_wmem_queued) >> 1;
 }
 
 static inline int sk_stream_wspace(const struct sock *sk)
 {
-       return sk->sk_sndbuf - sk->sk_wmem_queued;
+       return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued);
+}
+
+static inline void sk_wmem_queued_add(struct sock *sk, int val)
+{
+       WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
 }
 
 void sk_stream_write_space(struct sock *sk);
@@ -949,8 +954,8 @@ static inline void sk_incoming_cpu_update(struct sock *sk)
 {
        int cpu = raw_smp_processor_id();
 
-       if (unlikely(sk->sk_incoming_cpu != cpu))
-               sk->sk_incoming_cpu = cpu;
+       if (unlikely(READ_ONCE(sk->sk_incoming_cpu) != cpu))
+               WRITE_ONCE(sk->sk_incoming_cpu, cpu);
 }
 
 static inline void sock_rps_record_flow_hash(__u32 hash)
@@ -1207,7 +1212,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
 
 static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
 {
-       if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+       if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf))
                return false;
 
        return sk->sk_prot->stream_memory_free ?
@@ -1467,7 +1472,7 @@ DECLARE_STATIC_KEY_FALSE(tcp_tx_skb_cache_key);
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
 {
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
-       sk->sk_wmem_queued -= skb->truesize;
+       sk_wmem_queued_add(sk, -skb->truesize);
        sk_mem_uncharge(sk, skb->truesize);
        if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
            !sk->sk_tx_skb_cache && !skb_cloned(skb)) {
@@ -2014,7 +2019,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
        skb->len             += copy;
        skb->data_len        += copy;
        skb->truesize        += copy;
-       sk->sk_wmem_queued   += copy;
+       sk_wmem_queued_add(sk, copy);
        sk_mem_charge(sk, copy);
        return 0;
 }
@@ -2220,10 +2225,14 @@ static inline void sk_wake_async(const struct sock *sk, int how, int band)
 
 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
 {
-       if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
-               sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
-               sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF);
-       }
+       u32 val;
+
+       if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
+               return;
+
+       val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
+
+       WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
 }
 
 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
@@ -2233,12 +2242,17 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
  * sk_page_frag - return an appropriate page_frag
  * @sk: socket
  *
- * If socket allocation mode allows current thread to sleep, it means its
- * safe to use the per task page_frag instead of the per socket one.
+ * Use the per task page_frag instead of the per socket one for
+ * optimization when we know that we're in the normal context and owns
+ * everything that's associated with %current.
+ *
+ * gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
+ * inside other socket operations and end up recursing into sk_page_frag()
+ * while it's already in use.
  */
 static inline struct page_frag *sk_page_frag(struct sock *sk)
 {
-       if (gfpflags_allow_blocking(sk->sk_allocation))
+       if (gfpflags_normal_context(sk->sk_allocation))
                return &current->task_frag;
 
        return &sk->sk_frag;
@@ -2251,7 +2265,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
  */
 static inline bool sock_writeable(const struct sock *sk)
 {
-       return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
+       return refcount_read(&sk->sk_wmem_alloc) < (READ_ONCE(sk->sk_sndbuf) >> 1);
 }
 
 static inline gfp_t gfp_any(void)
@@ -2271,7 +2285,9 @@ static inline long sock_sndtimeo(const struct sock *sk, bool noblock)
 
 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
 {
-       return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
+       int v = waitall ? len : min_t(int, READ_ONCE(sk->sk_rcvlowat), len);
+
+       return v ?: 1;
 }
 
 /* Alas, with timeout socket operations are not restartable.
index c9a3f9688223b231e5a8c90c2494ad85f3d63cfc..ab4eb5eb5d0705b815e5eec3a772d4776be8653e 100644 (file)
@@ -258,7 +258,7 @@ static inline bool tcp_under_memory_pressure(const struct sock *sk)
            mem_cgroup_under_socket_pressure(sk->sk_memcg))
                return true;
 
-       return tcp_memory_pressure;
+       return READ_ONCE(tcp_memory_pressure);
 }
 /*
  * The next routines deal with comparing 32 bit unsigned ints
@@ -1380,13 +1380,14 @@ static inline int tcp_win_from_space(const struct sock *sk, int space)
 /* Note: caller must be prepared to deal with negative returns */
 static inline int tcp_space(const struct sock *sk)
 {
-       return tcp_win_from_space(sk, sk->sk_rcvbuf - sk->sk_backlog.len -
+       return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) -
+                                 READ_ONCE(sk->sk_backlog.len) -
                                  atomic_read(&sk->sk_rmem_alloc));
 }
 
 static inline int tcp_full_space(const struct sock *sk)
 {
-       return tcp_win_from_space(sk, sk->sk_rcvbuf);
+       return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
 }
 
 extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1916,7 +1917,8 @@ static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
 static inline bool tcp_stream_memory_free(const struct sock *sk, int wake)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
-       u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+       u32 notsent_bytes = READ_ONCE(tp->write_seq) -
+                           READ_ONCE(tp->snd_nxt);
 
        return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
 }
index 335283dbe9b3d9ec8528ba6c7f11db63600060b5..373aadcfea21d302b878392bcf851ea93a6c6702 100644 (file)
@@ -197,6 +197,7 @@ struct vxlan_rdst {
        u8                       offloaded:1;
        __be32                   remote_vni;
        u32                      remote_ifindex;
+       struct net_device        *remote_dev;
        struct list_head         list;
        struct rcu_head          rcu;
        struct dst_cache         dst_cache;
index 6a47ba85c54c1109f32db61d700d81a23790474e..e7e733add99f80f68bd7cd4fd545c7322528bf18 100644 (file)
@@ -366,7 +366,7 @@ struct ib_tm_caps {
 
 struct ib_cq_init_attr {
        unsigned int    cqe;
-       int             comp_vector;
+       u32             comp_vector;
        u32             flags;
 };
 
index 3810b340551c35d2a621c7b243dced26dd4d7c28..6bd5ed695a5e8c3aed3869dad7c712bdd3234079 100644 (file)
@@ -32,6 +32,7 @@ extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
 struct scsi_eh_save {
        /* saved state */
        int result;
+       unsigned int resid_len;
        int eh_eflags;
        enum dma_data_direction data_direction;
        unsigned underflow;
index 0fd39295b426b0306e62bd7fd5e837c578f87f1c..057d2a2d0bd05f738936b167aef23a7529f76609 100644 (file)
@@ -264,6 +264,9 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_REG_ML_LOUTPAY             0x20
 #define AZX_REG_ML_LINPAY              0x30
 
+/* bit0 is reserved, with BIT(1) mapping to stream1 */
+#define ML_LOSIDV_STREAM_MASK          0xFFFE
+
 #define ML_LCTL_SCF_MASK                       0xF
 #define AZX_MLCTL_SPA                          (0x1 << 16)
 #define AZX_MLCTL_CPA                          (0x1 << 23)
index 985a5f583de4c7f10cf161267cd554999dad4bf8..31f76b6abf712463d4355c529be55e1519ed6dc7 100644 (file)
@@ -135,9 +135,9 @@ int asoc_simple_init_priv(struct asoc_simple_priv *priv,
                               struct link_info *li);
 
 #ifdef DEBUG
-inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
-                                 char *name,
-                                 struct asoc_simple_dai *dai)
+static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
+                                        char *name,
+                                        struct asoc_simple_dai *dai)
 {
        struct device *dev = simple_priv_to_dev(priv);
 
@@ -167,7 +167,7 @@ inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
                dev_dbg(dev, "%s clk %luHz\n", name, clk_get_rate(dai->clk));
 }
 
-inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
+static inline void asoc_simple_debug_info(struct asoc_simple_priv *priv)
 {
        struct snd_soc_card *card = simple_priv_to_card(priv);
        struct device *dev = simple_priv_to_dev(priv);
index 5df604de4f11937679c628c967de885cbbf7634f..75ae1899452b9b0272a1934a9f1634998f24ab1a 100644 (file)
@@ -1688,6 +1688,7 @@ TRACE_EVENT(qgroup_update_reserve,
                __entry->qgid           = qgroup->qgroupid;
                __entry->cur_reserved   = qgroup->rsv.values[type];
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("qgid=%llu type=%s cur_reserved=%llu diff=%lld",
@@ -1710,6 +1711,7 @@ TRACE_EVENT(qgroup_meta_reserve,
        TP_fast_assign_btrfs(root->fs_info,
                __entry->refroot        = root->root_key.objectid;
                __entry->diff           = diff;
+               __entry->type           = type;
        ),
 
        TP_printk_btrfs("refroot=%llu(%s) type=%s diff=%lld",
@@ -1726,7 +1728,6 @@ TRACE_EVENT(qgroup_meta_convert,
        TP_STRUCT__entry_btrfs(
                __field(        u64,    refroot                 )
                __field(        s64,    diff                    )
-               __field(        int,    type                    )
        ),
 
        TP_fast_assign_btrfs(root->fs_info,
index a13a62db356553552d0f20871075715d854ada2c..191fe447f9908c64bafad0ae55724136c75c08ea 100644 (file)
@@ -519,10 +519,10 @@ TRACE_EVENT(rxrpc_local,
            );
 
 TRACE_EVENT(rxrpc_peer,
-           TP_PROTO(struct rxrpc_peer *peer, enum rxrpc_peer_trace op,
+           TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
                     int usage, const void *where),
 
-           TP_ARGS(peer, op, usage, where),
+           TP_ARGS(peer_debug_id, op, usage, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       peer            )
@@ -532,7 +532,7 @@ TRACE_EVENT(rxrpc_peer,
                             ),
 
            TP_fast_assign(
-                   __entry->peer = peer->debug_id;
+                   __entry->peer = peer_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -546,10 +546,10 @@ TRACE_EVENT(rxrpc_peer,
            );
 
 TRACE_EVENT(rxrpc_conn,
-           TP_PROTO(struct rxrpc_connection *conn, enum rxrpc_conn_trace op,
+           TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
                     int usage, const void *where),
 
-           TP_ARGS(conn, op, usage, where),
+           TP_ARGS(conn_debug_id, op, usage, where),
 
            TP_STRUCT__entry(
                    __field(unsigned int,       conn            )
@@ -559,7 +559,7 @@ TRACE_EVENT(rxrpc_conn,
                             ),
 
            TP_fast_assign(
-                   __entry->conn = conn->debug_id;
+                   __entry->conn = conn_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -606,10 +606,10 @@ TRACE_EVENT(rxrpc_client,
            );
 
 TRACE_EVENT(rxrpc_call,
-           TP_PROTO(struct rxrpc_call *call, enum rxrpc_call_trace op,
+           TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
                     int usage, const void *where, const void *aux),
 
-           TP_ARGS(call, op, usage, where, aux),
+           TP_ARGS(call_debug_id, op, usage, where, aux),
 
            TP_STRUCT__entry(
                    __field(unsigned int,               call            )
@@ -620,7 +620,7 @@ TRACE_EVENT(rxrpc_call,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call_debug_id;
                    __entry->op = op;
                    __entry->usage = usage;
                    __entry->where = where;
@@ -1068,7 +1068,7 @@ TRACE_EVENT(rxrpc_recvmsg,
                             ),
 
            TP_fast_assign(
-                   __entry->call = call->debug_id;
+                   __entry->call = call ? call->debug_id : 0;
                    __entry->why = why;
                    __entry->seq = seq;
                    __entry->offset = offset;
index a0c4b8a3096604a9817a0f78f58409123a300352..51fe9f6719eb13d872054676078df87e293fcd01 100644 (file)
@@ -82,7 +82,7 @@ TRACE_EVENT(sock_rcvqueue_full,
        TP_fast_assign(
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
                __entry->truesize   = skb->truesize;
-               __entry->sk_rcvbuf  = sk->sk_rcvbuf;
+               __entry->sk_rcvbuf  = READ_ONCE(sk->sk_rcvbuf);
        ),
 
        TP_printk("rmem_alloc=%d truesize=%u sk_rcvbuf=%d",
@@ -115,7 +115,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
                __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
                __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
                __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
-               __entry->wmem_queued = sk->sk_wmem_queued;
+               __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
                __entry->kind = kind;
        ),
 
index c99b4f2482c6bf9b80b87ba847700839cbc8bd7f..4fe35d600ab880092c99cdb5fff82cff7a96a89e 100644 (file)
@@ -1003,6 +1003,8 @@ struct drm_amdgpu_info_device {
        __u64 high_va_max;
        /* gfx10 pa_sc_tile_steering_override */
        __u32 pa_sc_tile_steering_override;
+       /* disabled TCCs */
+       __u64 tcc_disabled_mask;
 };
 
 struct drm_amdgpu_info_hw_ip {
index 802b0377a49e55b7ecacc523f10efb5622c6f5f6..373cada8981595269d8142fdc5d331ec2902aceb 100644 (file)
  *
  * Protocol changelog:
  *
+ * 7.1:
+ *  - add the following messages:
+ *      FUSE_SETATTR, FUSE_SYMLINK, FUSE_MKNOD, FUSE_MKDIR, FUSE_UNLINK,
+ *      FUSE_RMDIR, FUSE_RENAME, FUSE_LINK, FUSE_OPEN, FUSE_READ, FUSE_WRITE,
+ *      FUSE_RELEASE, FUSE_FSYNC, FUSE_FLUSH, FUSE_SETXATTR, FUSE_GETXATTR,
+ *      FUSE_LISTXATTR, FUSE_REMOVEXATTR, FUSE_OPENDIR, FUSE_READDIR,
+ *      FUSE_RELEASEDIR
+ *  - add padding to messages to accommodate 32-bit servers on 64-bit kernels
+ *
+ * 7.2:
+ *  - add FOPEN_DIRECT_IO and FOPEN_KEEP_CACHE flags
+ *  - add FUSE_FSYNCDIR message
+ *
+ * 7.3:
+ *  - add FUSE_ACCESS message
+ *  - add FUSE_CREATE message
+ *  - add filehandle to fuse_setattr_in
+ *
+ * 7.4:
+ *  - add frsize to fuse_kstatfs
+ *  - clean up request size limit checking
+ *
+ * 7.5:
+ *  - add flags and max_write to fuse_init_out
+ *
+ * 7.6:
+ *  - add max_readahead to fuse_init_in and fuse_init_out
+ *
+ * 7.7:
+ *  - add FUSE_INTERRUPT message
+ *  - add POSIX file lock support
+ *
+ * 7.8:
+ *  - add lock_owner and flags fields to fuse_release_in
+ *  - add FUSE_BMAP message
+ *  - add FUSE_DESTROY message
+ *
  * 7.9:
  *  - new fuse_getattr_in input argument of GETATTR
  *  - add lk_flags in fuse_lk_in
index 1c215ea1798e6039f3c202faf80e8d98f82ed155..e168dc59e9a0d6c4b8b79242848c7db0a0b93337 100644 (file)
@@ -45,6 +45,27 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+struct nvme_passthru_cmd64 {
+       __u8    opcode;
+       __u8    flags;
+       __u16   rsvd1;
+       __u32   nsid;
+       __u32   cdw2;
+       __u32   cdw3;
+       __u64   metadata;
+       __u64   addr;
+       __u32   metadata_len;
+       __u32   data_len;
+       __u32   cdw10;
+       __u32   cdw11;
+       __u32   cdw12;
+       __u32   cdw13;
+       __u32   cdw14;
+       __u32   cdw15;
+       __u32   timeout_ms;
+       __u64   result;
+};
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
@@ -54,5 +75,7 @@ struct nvme_passthru_cmd {
 #define NVME_IOCTL_RESET       _IO('N', 0x44)
 #define NVME_IOCTL_SUBSYS_RESET        _IO('N', 0x45)
 #define NVME_IOCTL_RESCAN      _IO('N', 0x46)
+#define NVME_IOCTL_ADMIN64_CMD _IOWR('N', 0x47, struct nvme_passthru_cmd64)
+#define NVME_IOCTL_IO64_CMD    _IOWR('N', 0x48, struct nvme_passthru_cmd64)
 
 #endif /* _UAPI_LINUX_NVME_IOCTL_H */
index 364c350e85cd346d33ce139c51307595d132c94f..62b6f69bd9fb0c196732d4574f7b6dd764063d43 100644 (file)
@@ -35,6 +35,9 @@
 
 */
 
+#ifndef _UAPI_LINUX_PG_H
+#define _UAPI_LINUX_PG_H
+
 #define PG_MAGIC       'P'
 #define PG_RESET       'Z'
 #define PG_COMMAND     'C'
@@ -61,4 +64,4 @@ struct pg_read_hdr {
 
 };
 
-/* end of pg.h */
+#endif /* _UAPI_LINUX_PG_H */
index b3105ac1381a8d8651f7f2fbe6b700bd745ead01..25b4fa00bad1f4ccc22f7dfe708a95807415b151 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags:       Flags for the new process as listed above.
+ *               All flags are valid except for CSIGNAL and
+ *               CLONE_DETACHED.
+ * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
+ *               returned in this argument.
+ * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
+ *               child process will be returned in the child's
+ *               memory.
+ * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
+ *               the child process will be returned in the
+ *               parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ *               sent when the child exits.
+ * @stack:       Specify the location of the stack for the
+ *               child process.
+ *               Note, @stack is expected to point to the
+ *               lowest address. The stack direction will be
+ *               determined by the kernel and set up
+ *               appropriately based on @stack_size.
+ * @stack_size:  The size of the stack for the child process.
+ * @tls:         If CLONE_SETTLS is set, the tls descriptor
+ *               is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
  */
 struct clone_args {
        __aligned_u64 flags;
@@ -46,6 +73,9 @@ struct clone_args {
        __aligned_u64 stack_size;
        __aligned_u64 tls;
 };
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 
 /*
  * Scheduling policies
index 0f4f87a6fd54d7e18b2f6d9c73a0ff0cce38afd4..e7fe550b6038afeda29b4d8f29f8386f142ae46d 100644 (file)
 #define PORT_SUNIX     121
 
 /* Freescale Linflex UART */
-#define PORT_LINFLEXUART       121
+#define PORT_LINFLEXUART       122
 
 #endif /* _UAPILINUX_SERIAL_CORE_H */
index 98b30c1613b28031c27a35990510a2bafc0697c4..d89969aa9942ce39df52674bb911600ab16eac48 100644 (file)
@@ -212,30 +212,7 @@ int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
 
 bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
 
-efi_status_t xen_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc);
-efi_status_t xen_efi_set_time(efi_time_t *tm);
-efi_status_t xen_efi_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
-                                    efi_time_t *tm);
-efi_status_t xen_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm);
-efi_status_t xen_efi_get_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 *attr, unsigned long *data_size,
-                                 void *data);
-efi_status_t xen_efi_get_next_variable(unsigned long *name_size,
-                                      efi_char16_t *name, efi_guid_t *vendor);
-efi_status_t xen_efi_set_variable(efi_char16_t *name, efi_guid_t *vendor,
-                                 u32 attr, unsigned long data_size,
-                                 void *data);
-efi_status_t xen_efi_query_variable_info(u32 attr, u64 *storage_space,
-                                        u64 *remaining_space,
-                                        u64 *max_variable_size);
-efi_status_t xen_efi_get_next_high_mono_count(u32 *count);
-efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
-                                   unsigned long count, unsigned long sg_list);
-efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
-                                       unsigned long count, u64 *max_size,
-                                       int *reset_type);
-void xen_efi_reset_system(int reset_type, efi_status_t status,
-                         unsigned long data_size, efi_char16_t *data);
+void xen_efi_runtime_setup(void);
 
 
 #ifdef CONFIG_PREEMPT
index 66088a9e9b9e220b9b823999d4cc9b022e203c5f..ef0e1e3e66f4a50308819a332812fa5e88488afe 100644 (file)
@@ -502,7 +502,7 @@ int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt)
        return WARN_ON_ONCE(bpf_adj_branches(prog, off, off + cnt, off, false));
 }
 
-void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+static void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
 {
        int i;
 
index d27f3b60ff6d30a753b7b92b9126629eaab2bf4d..3867864cdc2fbb833cd063f2ee05f754a376b0a6 100644 (file)
@@ -128,7 +128,7 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr)
 
                if (!dtab->n_buckets) /* Overflow check */
                        return -EINVAL;
-               cost += sizeof(struct hlist_head) * dtab->n_buckets;
+               cost += (u64) sizeof(struct hlist_head) * dtab->n_buckets;
        }
 
        /* if map size is larger than memlock limit, reject it */
@@ -719,6 +719,32 @@ const struct bpf_map_ops dev_map_hash_ops = {
        .map_check_btf = map_check_no_btf,
 };
 
+static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
+                                      struct net_device *netdev)
+{
+       unsigned long flags;
+       u32 i;
+
+       spin_lock_irqsave(&dtab->index_lock, flags);
+       for (i = 0; i < dtab->n_buckets; i++) {
+               struct bpf_dtab_netdev *dev;
+               struct hlist_head *head;
+               struct hlist_node *next;
+
+               head = dev_map_index_hash(dtab, i);
+
+               hlist_for_each_entry_safe(dev, next, head, index_hlist) {
+                       if (netdev != dev->dev)
+                               continue;
+
+                       dtab->items--;
+                       hlist_del_rcu(&dev->index_hlist);
+                       call_rcu(&dev->rcu, __dev_map_entry_free);
+               }
+       }
+       spin_unlock_irqrestore(&dtab->index_lock, flags);
+}
+
 static int dev_map_notification(struct notifier_block *notifier,
                                ulong event, void *ptr)
 {
@@ -735,6 +761,11 @@ static int dev_map_notification(struct notifier_block *notifier,
                 */
                rcu_read_lock();
                list_for_each_entry_rcu(dtab, &dev_map_list, list) {
+                       if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
+                               dev_map_hash_remove_netdev(dtab, netdev);
+                               continue;
+                       }
+
                        for (i = 0; i < dtab->map.max_entries; i++) {
                                struct bpf_dtab_netdev *dev, *odev;
 
index 82eabd4e38adda6e95f8113d9a7f9bb61b45ab00..0937719b87e2665219c064b0dc886809a9ef8f50 100644 (file)
@@ -1326,24 +1326,32 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
 {
        struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
 
+       kvfree(aux->func_info);
        free_used_maps(aux);
        bpf_prog_uncharge_memlock(aux->prog);
        security_bpf_prog_free(aux);
        bpf_prog_free(aux->prog);
 }
 
+static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
+{
+       bpf_prog_kallsyms_del_all(prog);
+       btf_put(prog->aux->btf);
+       bpf_prog_free_linfo(prog);
+
+       if (deferred)
+               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+       else
+               __bpf_prog_put_rcu(&prog->aux->rcu);
+}
+
 static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
 {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_UNLOAD, 0);
                /* bpf_prog_free_id() must be called first */
                bpf_prog_free_id(prog, do_idr_lock);
-               bpf_prog_kallsyms_del_all(prog);
-               btf_put(prog->aux->btf);
-               kvfree(prog->aux->func_info);
-               bpf_prog_free_linfo(prog);
-
-               call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+               __bpf_prog_put_noref(prog, true);
        }
 }
 
@@ -1741,11 +1749,12 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
        return err;
 
 free_used_maps:
-       bpf_prog_free_linfo(prog);
-       kvfree(prog->aux->func_info);
-       btf_put(prog->aux->btf);
-       bpf_prog_kallsyms_del_subprogs(prog);
-       free_used_maps(prog->aux);
+       /* In case we have subprogs, we need to wait for a grace
+        * period before we can tear down JIT memory since symbols
+        * are already exposed under kallsyms.
+        */
+       __bpf_prog_put_noref(prog, prog->aux->func_cnt);
+       return err;
 free_prog:
        bpf_prog_uncharge_memlock(prog);
 free_prog_sec:
index c52bc91f882b29e44ea8eb56fd0ba9889c6a86df..c87ee6412b36aa8f088844c42abc2b8aa70d2749 100644 (file)
@@ -798,7 +798,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
                    cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
                        continue;
 
-               if (is_sched_load_balance(cp))
+               if (is_sched_load_balance(cp) &&
+                   !cpumask_empty(cp->effective_cpus))
                        csa[csn++] = cp;
 
                /* skip @cp's subtree if not a partition root */
index ca4e5d44b5712ddd2b8f4083dac773fa7ed12637..c00b9258fa6abaa77aecbd6a6f53a7e75dc00d93 100644 (file)
@@ -87,9 +87,9 @@ void *dma_common_contiguous_remap(struct page *page, size_t size,
  */
 void dma_common_free_remap(void *cpu_addr, size_t size)
 {
-       struct page **pages = dma_common_find_pages(cpu_addr);
+       struct vm_struct *area = find_vm_area(cpu_addr);
 
-       if (!pages) {
+       if (!area || area->flags != VM_DMA_COHERENT) {
                WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
                return;
        }
index 4655adbbae10d09eabfe155744b4e8539c507494..aec8dba2bea40652c9a5128a8fc8956f7a27f30c 100644 (file)
@@ -3779,11 +3779,23 @@ static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event)
        perf_event_groups_insert(&ctx->flexible_groups, event);
 }
 
+/* pick an event from the flexible_groups to rotate */
 static inline struct perf_event *
-ctx_first_active(struct perf_event_context *ctx)
+ctx_event_to_rotate(struct perf_event_context *ctx)
 {
-       return list_first_entry_or_null(&ctx->flexible_active,
-                                       struct perf_event, active_list);
+       struct perf_event *event;
+
+       /* pick the first active flexible event */
+       event = list_first_entry_or_null(&ctx->flexible_active,
+                                        struct perf_event, active_list);
+
+       /* if no active flexible event, pick the first event */
+       if (!event) {
+               event = rb_entry_safe(rb_first(&ctx->flexible_groups.tree),
+                                     typeof(*event), group_node);
+       }
+
+       return event;
 }
 
 static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
@@ -3808,9 +3820,9 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
        perf_pmu_disable(cpuctx->ctx.pmu);
 
        if (task_rotate)
-               task_event = ctx_first_active(task_ctx);
+               task_event = ctx_event_to_rotate(task_ctx);
        if (cpu_rotate)
-               cpu_event = ctx_first_active(&cpuctx->ctx);
+               cpu_event = ctx_event_to_rotate(&cpuctx->ctx);
 
        /*
         * As per the order given at ctx_resched() first 'pop' task flexible
@@ -5595,8 +5607,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                perf_pmu_output_stop(event);
 
                /* now it's safe to free the pages */
-               atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
-               atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+               if (!rb->aux_mmap_locked)
+                       atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+               else
+                       atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
 
                /* this has to be the last one */
                rb_free_aux(rb);
@@ -5668,7 +5682,8 @@ again:
         * undo the VM accounting.
         */
 
-       atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+       atomic_long_sub((size >> PAGE_SHIFT) + 1 - mmap_locked,
+                       &mmap_user->locked_vm);
        atomic64_sub(mmap_locked, &vma->vm_mm->pinned_vm);
        free_uid(mmap_user);
 
@@ -5812,8 +5827,20 @@ accounting:
 
        user_locked = atomic_long_read(&user->locked_vm) + user_extra;
 
-       if (user_locked > user_lock_limit)
+       if (user_locked <= user_lock_limit) {
+               /* charge all to locked_vm */
+       } else if (atomic_long_read(&user->locked_vm) >= user_lock_limit) {
+               /* charge all to pinned_vm */
+               extra = user_extra;
+               user_extra = 0;
+       } else {
+               /*
+                * charge locked_vm until it hits user_lock_limit;
+                * charge the rest from pinned_vm
+                */
                extra = user_locked - user_lock_limit;
+               user_extra -= extra;
+       }
 
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
@@ -6922,7 +6949,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
 static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
-       struct pmu *pmu = event->pmu;
+       struct pmu *pmu = event->ctx->pmu;
        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
@@ -10586,58 +10613,29 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        u32 size;
        int ret;
 
-       if (!access_ok(uattr, PERF_ATTR_SIZE_VER0))
-               return -EFAULT;
-
-       /*
-        * zero the full structure, so that a short copy will be nice.
-        */
+       /* Zero the full structure, so that a short copy will be nice. */
        memset(attr, 0, sizeof(*attr));
 
        ret = get_user(size, &uattr->size);
        if (ret)
                return ret;
 
-       if (size > PAGE_SIZE)   /* silly large */
-               goto err_size;
-
-       if (!size)              /* abi compat */
+       /* ABI compatibility quirk: */
+       if (!size)
                size = PERF_ATTR_SIZE_VER0;
-
-       if (size < PERF_ATTR_SIZE_VER0)
+       if (size < PERF_ATTR_SIZE_VER0 || size > PAGE_SIZE)
                goto err_size;
 
-       /*
-        * If we're handed a bigger struct than we know of,
-        * ensure all the unknown bits are 0 - i.e. new
-        * user-space does not rely on any kernel feature
-        * extensions we dont know about yet.
-        */
-       if (size > sizeof(*attr)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uattr + sizeof(*attr);
-               end  = (void __user *)uattr + size;
-
-               for (; addr < end; addr++) {
-                       ret = get_user(val, addr);
-                       if (ret)
-                               return ret;
-                       if (val)
-                               goto err_size;
-               }
-               size = sizeof(*attr);
+       ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+       if (ret) {
+               if (ret == -E2BIG)
+                       goto err_size;
+               return ret;
        }
 
-       ret = copy_from_user(attr, uattr, size);
-       if (ret)
-               return -EFAULT;
-
        attr->size = size;
 
-       if (attr->__reserved_1)
+       if (attr->__reserved_1 || attr->__reserved_2)
                return -EINVAL;
 
        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
@@ -11891,6 +11889,10 @@ static int inherit_group(struct perf_event *parent_event,
                                            child, leader, child_ctx);
                if (IS_ERR(child_ctr))
                        return PTR_ERR(child_ctr);
+
+               if (sub->aux_event == parent_event &&
+                   !perf_get_aux_event(child_ctr, leader))
+                       return -EINVAL;
        }
        return 0;
 }
index 94d38a39d72ecaf3ad0cbee26f5aa9fa1e26fb0e..c74761004ee546272ebbbe56bb2fb62fd4148b0f 100644 (file)
@@ -474,14 +474,17 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
        struct vm_area_struct *vma;
        int ret, is_register, ref_ctr_updated = 0;
        bool orig_page_huge = false;
+       unsigned int gup_flags = FOLL_FORCE;
 
        is_register = is_swbp_insn(&opcode);
        uprobe = container_of(auprobe, struct uprobe, arch);
 
 retry:
+       if (is_register)
+               gup_flags |= FOLL_SPLIT_PMD;
        /* Read the page with vaddr into memory */
-       ret = get_user_pages_remote(NULL, mm, vaddr, 1,
-                       FOLL_FORCE | FOLL_SPLIT_PMD, &old_page, &vma, NULL);
+       ret = get_user_pages_remote(NULL, mm, vaddr, 1, gup_flags,
+                                   &old_page, &vma, NULL);
        if (ret <= 0)
                return ret;
 
@@ -489,6 +492,12 @@ retry:
        if (ret <= 0)
                goto put_old;
 
+       if (WARN(!is_register && PageCompound(old_page),
+                "uprobe unregister should never work on compound page\n")) {
+               ret = -EINVAL;
+               goto put_old;
+       }
+
        /* We are going to replace instruction, update ref_ctr. */
        if (!ref_ctr_updated && uprobe->ref_ctr_offset) {
                ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
index f9572f416126283dd2e8ac6ce3bfd66899e58016..55af6931c6ec3f8b817836c7fc78d56638f7de8c 100644 (file)
@@ -2525,39 +2525,19 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
 #ifdef __ARCH_WANT_SYS_CLONE3
 noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
                                              struct clone_args __user *uargs,
-                                             size_t size)
+                                             size_t usize)
 {
+       int err;
        struct clone_args args;
 
-       if (unlikely(size > PAGE_SIZE))
+       if (unlikely(usize > PAGE_SIZE))
                return -E2BIG;
-
-       if (unlikely(size < sizeof(struct clone_args)))
+       if (unlikely(usize < CLONE_ARGS_SIZE_VER0))
                return -EINVAL;
 
-       if (unlikely(!access_ok(uargs, size)))
-               return -EFAULT;
-
-       if (size > sizeof(struct clone_args)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uargs + sizeof(struct clone_args);
-               end = (void __user *)uargs + size;
-
-               for (; addr < end; addr++) {
-                       if (get_user(val, addr))
-                               return -EFAULT;
-                       if (val)
-                               return -E2BIG;
-               }
-
-               size = sizeof(struct clone_args);
-       }
-
-       if (copy_from_user(&args, uargs, size))
-               return -EFAULT;
+       err = copy_struct_from_user(&args, sizeof(args), uargs, usize);
+       if (err)
+               return err;
 
        /*
         * Verify that higher 32bits of exit_signal are unset and that
@@ -2581,7 +2561,35 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
        return 0;
 }
 
-static bool clone3_args_valid(const struct kernel_clone_args *kargs)
+/**
+ * clone3_stack_valid - check and prepare stack
+ * @kargs: kernel clone args
+ *
+ * Verify that the stack arguments userspace gave us are sane.
+ * In addition, set the stack direction for userspace since it's easy for us to
+ * determine.
+ */
+static inline bool clone3_stack_valid(struct kernel_clone_args *kargs)
+{
+       if (kargs->stack == 0) {
+               if (kargs->stack_size > 0)
+                       return false;
+       } else {
+               if (kargs->stack_size == 0)
+                       return false;
+
+               if (!access_ok((void __user *)kargs->stack, kargs->stack_size))
+                       return false;
+
+#if !defined(CONFIG_STACK_GROWSUP) && !defined(CONFIG_IA64)
+               kargs->stack += kargs->stack_size;
+#endif
+       }
+
+       return true;
+}
+
+static bool clone3_args_valid(struct kernel_clone_args *kargs)
 {
        /*
         * All lower bits of the flag word are taken.
@@ -2601,9 +2609,23 @@ static bool clone3_args_valid(const struct kernel_clone_args *kargs)
            kargs->exit_signal)
                return false;
 
+       if (!clone3_stack_valid(kargs))
+               return false;
+
        return true;
 }
 
+/**
+ * clone3 - create a new process with specific properties
+ * @uargs: argument structure
+ * @size:  size of @uargs
+ *
+ * clone3() is the extensible successor to clone()/clone2().
+ * It takes a struct as argument that is versioned by its size.
+ *
+ * Return: On success, a positive PID for the child process.
+ *         On error, a negative errno number.
+ */
 SYSCALL_DEFINE2(clone3, struct clone_args __user *, uargs, size_t, size)
 {
        int err;
@@ -2934,7 +2956,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        struct ctl_table t;
        int ret;
        int threads = max_threads;
-       int min = MIN_THREADS;
+       int min = 1;
        int max = MAX_THREADS;
 
        t = *table;
@@ -2946,7 +2968,7 @@ int sysctl_max_threads(struct ctl_table *table, int write,
        if (ret || !write)
                return ret;
 
-       set_max_threads(threads);
+       max_threads = threads;
 
        return 0;
 }
index c0738424bb43331bc96c787da8f3ec3d678053d8..dc520f01f99ddc053366ab5fdbaffd126f519c24 100644 (file)
@@ -22,12 +22,6 @@ EXPORT_SYMBOL(system_freezing_cnt);
 bool pm_freezing;
 bool pm_nosig_freezing;
 
-/*
- * Temporary export for the deadlock workaround in ata_scsi_hotplug().
- * Remove once the hack becomes unnecessary.
- */
-EXPORT_SYMBOL_GPL(pm_freezing);
-
 /* protects freezing and frozen transitions */
 static DEFINE_SPINLOCK(freezer_lock);
 
index 9ff449888d9cda491a39b0ffeba27c31d47a72b3..5a0fc0b0403a6c46cd41a0b73a07846eb118a016 100755 (executable)
@@ -71,7 +71,13 @@ done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
 find $cpio_dir -type f -print0 |
        xargs -0 -P8 -n1 perl -pi -e 'BEGIN {undef $/;}; s/\/\*((?!SPDX).)*?\*\///smg;'
 
-tar -Jcf $tarfile -C $cpio_dir/ . > /dev/null
+# Create archive and try to normalize metadata for reproducibility.
+# For compatibility with older versions of tar, files are fed to tar
+# pre-sorted, as --sort=name might not be available.
+find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
+    tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
+    --owner=0 --group=0 --numeric-owner --no-recursion \
+    -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
 
 echo "$src_files_md5" >  kernel/kheaders.md5
 echo "$obj_files_md5" >> kernel/kheaders.md5
index 621467c33fefda0e580572cbfa865d7472872f75..b262f47046ca4feb66f61dc802a8a45bd15c222b 100644 (file)
@@ -866,9 +866,9 @@ void kthread_delayed_work_timer_fn(struct timer_list *t)
 }
 EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
 
-void __kthread_queue_delayed_work(struct kthread_worker *worker,
-                                 struct kthread_delayed_work *dwork,
-                                 unsigned long delay)
+static void __kthread_queue_delayed_work(struct kthread_worker *worker,
+                                        struct kthread_delayed_work *dwork,
+                                        unsigned long delay)
 {
        struct timer_list *timer = &dwork->timer;
        struct kthread_work *work = &dwork->work;
index 47e8ebccc22be40478e2581616456f5749b7c4a9..f470a038b05bd1111bc3e0dd2195d6b15e823d14 100644 (file)
@@ -180,6 +180,7 @@ void panic(const char *fmt, ...)
         * after setting panic_cpu) from invoking panic() again.
         */
        local_irq_disable();
+       preempt_disable_notrace();
 
        /*
         * It's possible to come here directly from a panic-assertion and
index e8710d179b35beace0e7d48f1a2fd7af93e09ddf..e26de7af520beba281df4e42f11f06b150a0fab7 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/suspend.h>
 #include <linux/syscalls.h>
+#include <linux/pm_runtime.h>
 
 #include "power.h"
 
index 9568a2fe7c116155f6c6c2c4a3220cd58f78a32b..04e83fdfbe80c016d986adae9fe2fd36bc2ca8d7 100644 (file)
@@ -650,3 +650,243 @@ static int __init pm_qos_power_init(void)
 }
 
 late_initcall(pm_qos_power_init);
+
+/* Definitions related to the frequency QoS below. */
+
+/**
+ * freq_constraints_init - Initialize frequency QoS constraints.
+ * @qos: Frequency QoS constraints to initialize.
+ */
+void freq_constraints_init(struct freq_constraints *qos)
+{
+       struct pm_qos_constraints *c;
+
+       c = &qos->min_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
+       c->type = PM_QOS_MAX;
+       c->notifiers = &qos->min_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+
+       c = &qos->max_freq;
+       plist_head_init(&c->list);
+       c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
+       c->type = PM_QOS_MIN;
+       c->notifiers = &qos->max_freq_notifiers;
+       BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
+}
+
+/**
+ * freq_qos_read_value - Get frequency QoS constraint for a given list.
+ * @qos: Constraints to evaluate.
+ * @type: QoS request type.
+ */
+s32 freq_qos_read_value(struct freq_constraints *qos,
+                       enum freq_qos_req_type type)
+{
+       s32 ret;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MIN_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->min_freq);
+               break;
+       case FREQ_QOS_MAX:
+               ret = IS_ERR_OR_NULL(qos) ?
+                       FREQ_QOS_MAX_DEFAULT_VALUE :
+                       pm_qos_read_value(&qos->max_freq);
+               break;
+       default:
+               WARN_ON(1);
+               ret = 0;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_apply - Add/modify/remove frequency QoS request.
+ * @req: Constraint request to apply.
+ * @action: Action to perform (add/update/remove).
+ * @value: Value to assign to the QoS request.
+ */
+static int freq_qos_apply(struct freq_qos_request *req,
+                         enum pm_qos_req_action action, s32 value)
+{
+       int ret;
+
+       switch(req->type) {
+       case FREQ_QOS_MIN:
+               ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
+                                          action, value);
+               break;
+       case FREQ_QOS_MAX:
+               ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
+                                          action, value);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+/**
+ * freq_qos_add_request - Insert new frequency QoS request into a given list.
+ * @qos: Constraints to update.
+ * @req: Preallocated request object.
+ * @type: Request type.
+ * @value: Request value.
+ *
+ * Insert a new entry into the @qos list of requests, recompute the effective
+ * QoS constraint value for that list and initialize the @req object.  The
+ * caller needs to save that object for later use in updates and removal.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_add_request(struct freq_constraints *qos,
+                        struct freq_qos_request *req,
+                        enum freq_qos_req_type type, s32 value)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !req)
+               return -EINVAL;
+
+       if (WARN(freq_qos_request_active(req),
+                "%s() called for active request\n", __func__))
+               return -EINVAL;
+
+       req->qos = qos;
+       req->type = type;
+       ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
+       if (ret < 0) {
+               req->qos = NULL;
+               req->type = 0;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_request);
+
+/**
+ * freq_qos_update_request - Modify existing frequency QoS request.
+ * @req: Request to modify.
+ * @new_value: New request value.
+ *
+ * Update an existing frequency QoS request along with the effective constraint
+ * value for the list of requests it belongs to.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
+{
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       if (req->pnode.prio == new_value)
+               return 0;
+
+       return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
+}
+EXPORT_SYMBOL_GPL(freq_qos_update_request);
+
+/**
+ * freq_qos_remove_request - Remove frequency QoS request from its list.
+ * @req: Request to remove.
+ *
+ * Remove the given frequency QoS request from the list of constraints it
+ * belongs to and recompute the effective constraint value for that list.
+ *
+ * Return 1 if the effective constraint value has changed, 0 if the effective
+ * constraint value has not changed, or a negative error code on failures.
+ */
+int freq_qos_remove_request(struct freq_qos_request *req)
+{
+       if (!req)
+               return -EINVAL;
+
+       if (WARN(!freq_qos_request_active(req),
+                "%s() called for unknown object\n", __func__))
+               return -EINVAL;
+
+       return freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_request);
+
+/**
+ * freq_qos_add_notifier - Add frequency QoS change notifier.
+ * @qos: List of requests to add the notifier to.
+ * @type: Request type.
+ * @notifier: Notifier block to add.
+ */
+int freq_qos_add_notifier(struct freq_constraints *qos,
+                         enum freq_qos_req_type type,
+                         struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
+                                                      notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
+                                                      notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
+
+/**
+ * freq_qos_remove_notifier - Remove frequency QoS change notifier.
+ * @qos: List of requests to remove the notifier from.
+ * @type: Request type.
+ * @notifier: Notifier block to remove.
+ */
+int freq_qos_remove_notifier(struct freq_constraints *qos,
+                            enum freq_qos_req_type type,
+                            struct notifier_block *notifier)
+{
+       int ret;
+
+       if (IS_ERR_OR_NULL(qos) || !notifier)
+               return -EINVAL;
+
+       switch (type) {
+       case FREQ_QOS_MIN:
+               ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
+                                                        notifier);
+               break;
+       case FREQ_QOS_MAX:
+               ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
+                                                        notifier);
+               break;
+       default:
+               WARN_ON(1);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
index 7880f4f64d0eea19dab03a0408625a505b4454ed..dd05a378631a866c2f499c56986e5de7a703e766 100644 (file)
@@ -5106,9 +5106,6 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
        u32 size;
        int ret;
 
-       if (!access_ok(uattr, SCHED_ATTR_SIZE_VER0))
-               return -EFAULT;
-
        /* Zero the full structure, so that a short copy will be nice: */
        memset(attr, 0, sizeof(*attr));
 
@@ -5116,45 +5113,19 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
        if (ret)
                return ret;
 
-       /* Bail out on silly large: */
-       if (size > PAGE_SIZE)
-               goto err_size;
-
        /* ABI compatibility quirk: */
        if (!size)
                size = SCHED_ATTR_SIZE_VER0;
-
-       if (size < SCHED_ATTR_SIZE_VER0)
+       if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
                goto err_size;
 
-       /*
-        * If we're handed a bigger struct than we know of,
-        * ensure all the unknown bits are 0 - i.e. new
-        * user-space does not rely on any kernel feature
-        * extensions we dont know about yet.
-        */
-       if (size > sizeof(*attr)) {
-               unsigned char __user *addr;
-               unsigned char __user *end;
-               unsigned char val;
-
-               addr = (void __user *)uattr + sizeof(*attr);
-               end  = (void __user *)uattr + size;
-
-               for (; addr < end; addr++) {
-                       ret = get_user(val, addr);
-                       if (ret)
-                               return ret;
-                       if (val)
-                               goto err_size;
-               }
-               size = sizeof(*attr);
+       ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+       if (ret) {
+               if (ret == -E2BIG)
+                       goto err_size;
+               return ret;
        }
 
-       ret = copy_from_user(attr, uattr, size);
-       if (ret)
-               return -EFAULT;
-
        if ((attr->sched_flags & SCHED_FLAG_UTIL_CLAMP) &&
            size < SCHED_ATTR_SIZE_VER1)
                return -EINVAL;
@@ -5354,7 +5325,7 @@ sched_attr_copy_to_user(struct sched_attr __user *uattr,
  * sys_sched_getattr - similar to sched_getparam, but with sched_attr
  * @pid: the pid in question.
  * @uattr: structure containing the extended parameters.
- * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
+ * @usize: sizeof(attr) for fwd/bwd comp.
  * @flags: for future extension.
  */
 SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
index 2305ce89a26cfed3728f82b92d9ee73252f5ba1d..46ed4e1383e21c4f99fd832cc6bc55f9606a1c1e 100644 (file)
@@ -740,7 +740,7 @@ void vtime_account_system(struct task_struct *tsk)
 
        write_seqcount_begin(&vtime->seqcount);
        /* We might have scheduled out from guest path */
-       if (current->flags & PF_VCPU)
+       if (tsk->flags & PF_VCPU)
                vtime_account_guest(tsk, vtime);
        else
                __vtime_account_system(tsk, vtime);
@@ -783,7 +783,7 @@ void vtime_guest_enter(struct task_struct *tsk)
         */
        write_seqcount_begin(&vtime->seqcount);
        __vtime_account_system(tsk, vtime);
-       current->flags |= PF_VCPU;
+       tsk->flags |= PF_VCPU;
        write_seqcount_end(&vtime->seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_enter);
@@ -794,7 +794,7 @@ void vtime_guest_exit(struct task_struct *tsk)
 
        write_seqcount_begin(&vtime->seqcount);
        vtime_account_guest(tsk, vtime);
-       current->flags &= ~PF_VCPU;
+       tsk->flags &= ~PF_VCPU;
        write_seqcount_end(&vtime->seqcount);
 }
 EXPORT_SYMBOL_GPL(vtime_guest_exit);
index 83ab35e2374f632e0211029d7e1baa70150c388a..682a754ea3e1a867e7b1c0cb2c8dee0dfc5c1461 100644 (file)
@@ -4926,20 +4926,28 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
                if (++count > 3) {
                        u64 new, old = ktime_to_ns(cfs_b->period);
 
-                       new = (old * 147) / 128; /* ~115% */
-                       new = min(new, max_cfs_quota_period);
-
-                       cfs_b->period = ns_to_ktime(new);
-
-                       /* since max is 1s, this is limited to 1e9^2, which fits in u64 */
-                       cfs_b->quota *= new;
-                       cfs_b->quota = div64_u64(cfs_b->quota, old);
-
-                       pr_warn_ratelimited(
-       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us %lld, cfs_quota_us = %lld)\n",
-                               smp_processor_id(),
-                               div_u64(new, NSEC_PER_USEC),
-                               div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       /*
+                        * Grow period by a factor of 2 to avoid losing precision.
+                        * Precision loss in the quota/period ratio can cause __cfs_schedulable
+                        * to fail.
+                        */
+                       new = old * 2;
+                       if (new < max_cfs_quota_period) {
+                               cfs_b->period = ns_to_ktime(new);
+                               cfs_b->quota *= 2;
+
+                               pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+                                       smp_processor_id(),
+                                       div_u64(new, NSEC_PER_USEC),
+                                       div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       } else {
+                               pr_warn_ratelimited(
+       "cfs_period_timer[cpu%d]: period too short, but cannot scale up without losing precision (cfs_period_us = %lld, cfs_quota_us = %lld)\n",
+                                       smp_processor_id(),
+                                       div_u64(old, NSEC_PER_USEC),
+                                       div_u64(cfs_b->quota, NSEC_PER_USEC));
+                       }
 
                        /* reset count so we don't come right back in here */
                        count = 0;
index a39bed2c784f42be244082e5d80320f72e9d8daf..168479a7d61b8cf411ec5cd5554e00823c99f98f 100644 (file)
@@ -174,7 +174,6 @@ static int membarrier_private_expedited(int flags)
                 */
                if (cpu == raw_smp_processor_id())
                        continue;
-               rcu_read_lock();
                p = rcu_dereference(cpu_rq(cpu)->curr);
                if (p && p->mm == mm)
                        __cpumask_set_cpu(cpu, tmpmask);
index b5667a273bf67e0718371a783b259ffb4c91cbc7..49b835f1305f8c1cfe659bb466a00c4b71767185 100644 (file)
@@ -1948,7 +1948,7 @@ next_level:
 static int
 build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *attr)
 {
-       enum s_alloc alloc_state;
+       enum s_alloc alloc_state = sa_none;
        struct sched_domain *sd;
        struct s_data d;
        struct rq *rq = NULL;
@@ -1956,6 +1956,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        struct sched_domain_topology_level *tl_asym;
        bool has_asym = false;
 
+       if (WARN_ON(cpumask_empty(cpu_map)))
+               goto error;
+
        alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
        if (alloc_state != sa_rootdomain)
                goto error;
@@ -2026,7 +2029,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
        rcu_read_unlock();
 
        if (has_asym)
-               static_branch_enable_cpuslocked(&sched_asym_cpucapacity);
+               static_branch_inc_cpuslocked(&sched_asym_cpucapacity);
 
        if (rq && sched_debug_enabled) {
                pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
@@ -2121,8 +2124,12 @@ int sched_init_domains(const struct cpumask *cpu_map)
  */
 static void detach_destroy_domains(const struct cpumask *cpu_map)
 {
+       unsigned int cpu = cpumask_any(cpu_map);
        int i;
 
+       if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu)))
+               static_branch_dec_cpuslocked(&sched_asym_cpucapacity);
+
        rcu_read_lock();
        for_each_cpu(i, cpu_map)
                cpu_attach_domain(NULL, &def_root_domain, i);
index c7031a22aa7bcb9dfd0f73da764108a321e99442..998d50ee2d9bbb0856a1a374279dfb170db751a2 100644 (file)
@@ -7,6 +7,7 @@
  * Copyright (C) 2010          SUSE Linux Products GmbH
  * Copyright (C) 2010          Tejun Heo <tj@kernel.org>
  */
+#include <linux/compiler.h>
 #include <linux/completion.h>
 #include <linux/cpu.h>
 #include <linux/init.h>
@@ -167,7 +168,7 @@ static void set_state(struct multi_stop_data *msdata,
        /* Reset ack counter. */
        atomic_set(&msdata->thread_ack, msdata->num_threads);
        smp_wmb();
-       msdata->state = newstate;
+       WRITE_ONCE(msdata->state, newstate);
 }
 
 /* Last one to ack a state moves to the next state. */
@@ -186,7 +187,7 @@ void __weak stop_machine_yield(const struct cpumask *cpumask)
 static int multi_cpu_stop(void *data)
 {
        struct multi_stop_data *msdata = data;
-       enum multi_stop_state curstate = MULTI_STOP_NONE;
+       enum multi_stop_state newstate, curstate = MULTI_STOP_NONE;
        int cpu = smp_processor_id(), err = 0;
        const struct cpumask *cpumask;
        unsigned long flags;
@@ -210,8 +211,9 @@ static int multi_cpu_stop(void *data)
        do {
                /* Chill out and ensure we re-read multi_stop_state. */
                stop_machine_yield(cpumask);
-               if (msdata->state != curstate) {
-                       curstate = msdata->state;
+               newstate = READ_ONCE(msdata->state);
+               if (newstate != curstate) {
+                       curstate = newstate;
                        switch (curstate) {
                        case MULTI_STOP_DISABLE_IRQ:
                                local_irq_disable();
index 00fcea236ebacffa71236af5ddfcaf2df8d0855d..b6f2f35d0bcf541892e3be9b0c22957552a70d9c 100644 (file)
@@ -163,7 +163,7 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
 #ifdef CONFIG_SPARC
 #endif
 
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
 extern int pwrsw_enabled;
 #endif
 
@@ -620,7 +620,7 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = proc_dointvec,
        },
 #endif
-#ifdef __hppa__
+#ifdef CONFIG_PARISC
        {
                .procname       = "soft-power",
                .data           = &pwrsw_enabled,
index 0d4dc241c0fb498036c91a571e65cb00f5d19ba6..65605530ee349c9682690c4fccb43aa9284d4287 100644 (file)
@@ -164,7 +164,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
        struct hrtimer_clock_base *base;
 
        for (;;) {
-               base = timer->base;
+               base = READ_ONCE(timer->base);
                if (likely(base != &migration_base)) {
                        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
                        if (likely(base == timer->base))
@@ -244,7 +244,7 @@ again:
                        return base;
 
                /* See the comment in lock_hrtimer_base() */
-               timer->base = &migration_base;
+               WRITE_ONCE(timer->base, &migration_base);
                raw_spin_unlock(&base->cpu_base->lock);
                raw_spin_lock(&new_base->cpu_base->lock);
 
@@ -253,10 +253,10 @@ again:
                        raw_spin_unlock(&new_base->cpu_base->lock);
                        raw_spin_lock(&base->cpu_base->lock);
                        new_cpu_base = this_cpu_base;
-                       timer->base = base;
+                       WRITE_ONCE(timer->base, base);
                        goto again;
                }
-               timer->base = new_base;
+               WRITE_ONCE(timer->base, new_base);
        } else {
                if (new_cpu_base != this_cpu_base &&
                    hrtimer_check_target(timer, new_base)) {
index 92a431981b1c46862da651201f3721140d76af09..42d512fcfda2e6de092831f97bfc75c85b0b6137 100644 (file)
@@ -266,7 +266,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic,
 /**
  * thread_group_sample_cputime - Sample cputime for a given task
  * @tsk:       Task for which cputime needs to be started
- * @iimes:     Storage for time samples
+ * @samples:   Storage for time samples
  *
  * Called from sys_getitimer() to calculate the expiry time of an active
  * timer. That means group cputime accounting is already active. Called
@@ -1038,12 +1038,12 @@ unlock:
  * member of @pct->bases[CLK].nextevt. False otherwise
  */
 static inline bool
-task_cputimers_expired(const u64 *sample, struct posix_cputimers *pct)
+task_cputimers_expired(const u64 *samples, struct posix_cputimers *pct)
 {
        int i;
 
        for (i = 0; i < CPUCLOCK_MAX; i++) {
-               if (sample[i] >= pct->bases[i].nextevt)
+               if (samples[i] >= pct->bases[i].nextevt)
                        return true;
        }
        return false;
index 142b07619918b13db03d1c3a91437c1f96ff28bf..dbd69052eaa6666854b47562c278336a87a14d89 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/seqlock.h>
 #include <linux/bitops.h>
 
+#include "timekeeping.h"
+
 /**
  * struct clock_read_data - data required to read from sched_clock()
  *
index c1f5bb590b5e43b5a8598b789bc09157add53448..b5a65e212df2f9a1e4b55df717a7f97652b1ed61 100644 (file)
@@ -42,39 +42,39 @@ static int bc_shutdown(struct clock_event_device *evt)
  */
 static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
 {
-       int bc_moved;
        /*
-        * We try to cancel the timer first. If the callback is on
-        * flight on some other cpu then we let it handle it. If we
-        * were able to cancel the timer nothing can rearm it as we
-        * own broadcast_lock.
+        * This is called either from enter/exit idle code or from the
+        * broadcast handler. In all cases tick_broadcast_lock is held.
         *
-        * However we can also be called from the event handler of
-        * ce_broadcast_hrtimer itself when it expires. We cannot
-        * restart the timer because we are in the callback, but we
-        * can set the expiry time and let the callback return
-        * HRTIMER_RESTART.
+        * hrtimer_cancel() cannot be called here neither from the
+        * broadcast handler nor from the enter/exit idle code. The idle
+        * code can run into the problem described in bc_shutdown() and the
+        * broadcast handler cannot wait for itself to complete for obvious
+        * reasons.
         *
-        * Since we are in the idle loop at this point and because
-        * hrtimer_{start/cancel} functions call into tracing,
-        * calls to these functions must be bound within RCU_NONIDLE.
+        * Each caller tries to arm the hrtimer on its own CPU, but if the
+        * hrtimer callbback function is currently running, then
+        * hrtimer_start() cannot move it and the timer stays on the CPU on
+        * which it is assigned at the moment.
+        *
+        * As this can be called from idle code, the hrtimer_start()
+        * invocation has to be wrapped with RCU_NONIDLE() as
+        * hrtimer_start() can call into tracing.
         */
-       RCU_NONIDLE(
-               {
-                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
-                       if (bc_moved) {
-                               hrtimer_start(&bctimer, expires,
-                                             HRTIMER_MODE_ABS_PINNED_HARD);
-                       }
-               }
-       );
-
-       if (bc_moved) {
-               /* Bind the "device" to the cpu */
-               bc->bound_on = smp_processor_id();
-       } else if (bc->bound_on == smp_processor_id()) {
-               hrtimer_set_expires(&bctimer, expires);
-       }
+       RCU_NONIDLE( {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED_HARD);
+               /*
+                * The core tick broadcast mode expects bc->bound_on to be set
+                * correctly to prevent a CPU which has the broadcast hrtimer
+                * armed from going deep idle.
+                *
+                * As tick_broadcast_lock is held, nothing can change the cpu
+                * base which was just established in hrtimer_start() above. So
+                * the below access is safe even without holding the hrtimer
+                * base lock.
+                */
+               bc->bound_on = bctimer.base->cpu_base->cpu;
+       } );
        return 0;
 }
 
@@ -100,10 +100,6 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (clockevent_state_oneshot(&ce_broadcast_hrtimer))
-               if (ce_broadcast_hrtimer.next_event != KTIME_MAX)
-                       return HRTIMER_RESTART;
-
        return HRTIMER_NORESTART;
 }
 
index 62a50bf399d6365131158cb65c2fc4ea62526202..f296d89be757974861953602ba933859ed8d7cb7 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/clocksource.h>
 #include <linux/sched/task.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/tracefs.h>
 #include <linux/hardirq.h>
@@ -3486,6 +3487,11 @@ static int
 ftrace_avail_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
@@ -3505,6 +3511,15 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
 {
        struct ftrace_iterator *iter;
 
+       /*
+        * This shows us what functions are currently being
+        * traced and by what. Not sure if we want lockdown
+        * to hide such critical information for an admin.
+        * Although, perhaps it can show information we don't
+        * want people to see, but if something is tracing
+        * something, we probably want to know about it.
+        */
+
        iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
        if (!iter)
                return -ENOMEM;
@@ -3540,21 +3555,22 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
        struct ftrace_hash *hash;
        struct list_head *mod_head;
        struct trace_array *tr = ops->private;
-       int ret = 0;
+       int ret = -ENOMEM;
 
        ftrace_ops_init(ops);
 
        if (unlikely(ftrace_disabled))
                return -ENODEV;
 
+       if (tracing_check_open_get_tr(tr))
+               return -ENODEV;
+
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter)
-               return -ENOMEM;
+               goto out;
 
-       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
-               kfree(iter);
-               return -ENOMEM;
-       }
+       if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX))
+               goto out;
 
        iter->ops = ops;
        iter->flags = flag;
@@ -3584,13 +3600,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
 
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
-                       ret = -ENOMEM;
                        goto out_unlock;
                }
        } else
                iter->hash = hash;
 
+       ret = 0;
+
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3602,7 +3618,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                        /* Failed */
                        free_ftrace_hash(iter->hash);
                        trace_parser_put(&iter->parser);
-                       kfree(iter);
                }
        } else
                file->private_data = iter;
@@ -3610,6 +3625,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
  out_unlock:
        mutex_unlock(&ops->func_hash->regex_lock);
 
+ out:
+       if (ret) {
+               kfree(iter);
+               if (tr)
+                       trace_array_put(tr);
+       }
+
        return ret;
 }
 
@@ -3618,6 +3640,7 @@ ftrace_filter_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops,
                        FTRACE_ITER_FILTER | FTRACE_ITER_DO_PROBES,
                        inode, file);
@@ -3628,6 +3651,7 @@ ftrace_notrace_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
                                 inode, file);
 }
@@ -5037,6 +5061,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
 
        mutex_unlock(&iter->ops->func_hash->regex_lock);
        free_ftrace_hash(iter->hash);
+       if (iter->tr)
+               trace_array_put(iter->tr);
        kfree(iter);
 
        return 0;
@@ -5194,9 +5220,13 @@ static int
 __ftrace_graph_open(struct inode *inode, struct file *file,
                    struct ftrace_graph_data *fgd)
 {
-       int ret = 0;
+       int ret;
        struct ftrace_hash *new_hash = NULL;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if (file->f_mode & FMODE_WRITE) {
                const int size_bits = FTRACE_HASH_DEFAULT_BITS;
 
@@ -6537,8 +6567,9 @@ ftrace_pid_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret = 0;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
index 252f79c435f810f46dbe7bbde60602818b676706..6a0ee91783656afc85e9fe6ec37a802f2b7a4dd1 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/stacktrace.h>
 #include <linux/writeback.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/seq_file.h>
 #include <linux/notifier.h>
 #include <linux/irqflags.h>
@@ -304,6 +305,23 @@ void trace_array_put(struct trace_array *this_tr)
        mutex_unlock(&trace_types_lock);
 }
 
+int tracing_check_open_get_tr(struct trace_array *tr)
+{
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       if (tr && trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       return 0;
+}
+
 int call_filter_check_discard(struct trace_event_call *call, void *rec,
                              struct ring_buffer *buffer,
                              struct ring_buffer_event *event)
@@ -4140,8 +4158,11 @@ release:
 
 int tracing_open_generic(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        filp->private_data = inode->i_private;
        return 0;
@@ -4156,15 +4177,14 @@ bool tracing_is_disabled(void)
  * Open and update trace_array ref count.
  * Must have the current trace_array passed to it.
  */
-static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+int tracing_open_generic_tr(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
+       int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        filp->private_data = inode->i_private;
 
@@ -4233,10 +4253,11 @@ static int tracing_open(struct inode *inode, struct file *file)
 {
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
-       int ret = 0;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        /* If this file was open for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
@@ -4352,12 +4373,15 @@ static int show_traces_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = seq_open(file, &show_traces_seq_ops);
-       if (ret)
+       if (ret) {
+               trace_array_put(tr);
                return ret;
+       }
 
        m = file->private_data;
        m->private = tr;
@@ -4365,6 +4389,14 @@ static int show_traces_open(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int show_traces_release(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+
+       trace_array_put(tr);
+       return seq_release(inode, file);
+}
+
 static ssize_t
 tracing_write_stub(struct file *filp, const char __user *ubuf,
                   size_t count, loff_t *ppos)
@@ -4395,8 +4427,8 @@ static const struct file_operations tracing_fops = {
 static const struct file_operations show_traces_fops = {
        .open           = show_traces_open,
        .read           = seq_read,
-       .release        = seq_release,
        .llseek         = seq_lseek,
+       .release        = show_traces_release,
 };
 
 static ssize_t
@@ -4697,11 +4729,9 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_trace_options_show, inode->i_private);
        if (ret < 0)
@@ -5038,8 +5068,11 @@ static const struct seq_operations tracing_saved_tgids_seq_ops = {
 
 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_saved_tgids_seq_ops);
 }
@@ -5115,8 +5148,11 @@ static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
 
 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
 }
@@ -5280,8 +5316,11 @@ static const struct seq_operations tracing_eval_map_seq_ops = {
 
 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
 {
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
+
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
 
        return seq_open(filp, &tracing_eval_map_seq_ops);
 }
@@ -5804,13 +5843,11 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
-       int ret = 0;
-
-       if (tracing_disabled)
-               return -ENODEV;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        mutex_lock(&trace_types_lock);
 
@@ -5999,6 +6036,7 @@ waitagain:
               sizeof(struct trace_iterator) -
               offsetof(struct trace_iterator, seq));
        cpumask_clear(iter->started);
+       trace_seq_init(&iter->seq);
        iter->pos = -1;
 
        trace_event_read_lock();
@@ -6547,11 +6585,9 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr))
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_clock_show, inode->i_private);
        if (ret < 0)
@@ -6581,11 +6617,9 @@ static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr))
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
        if (ret < 0)
@@ -6638,10 +6672,11 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
        struct seq_file *m;
-       int ret = 0;
+       int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if (file->f_mode & FMODE_READ) {
                iter = __tracing_open(inode, file, true);
@@ -6786,6 +6821,7 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
        struct ftrace_buffer_info *info;
        int ret;
 
+       /* The following checks for tracefs lockdown */
        ret = tracing_buffers_open(inode, filp);
        if (ret < 0)
                return ret;
@@ -7105,8 +7141,9 @@ static int tracing_err_log_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret = 0;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        /* If this file was opened for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
@@ -7157,11 +7194,9 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
        struct ftrace_buffer_info *info;
        int ret;
 
-       if (tracing_disabled)
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        info = kzalloc(sizeof(*info), GFP_KERNEL);
        if (!info) {
index f801d154ff6a3bb71950c215d8598c9a9a5acbe0..d685c61085c0ddd028ce90db179dc22c8c4630c4 100644 (file)
@@ -338,6 +338,7 @@ extern struct mutex trace_types_lock;
 
 extern int trace_array_get(struct trace_array *tr);
 extern void trace_array_put(struct trace_array *tr);
+extern int tracing_check_open_get_tr(struct trace_array *tr);
 
 extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
 extern int tracing_set_clock(struct trace_array *tr, const char *clockstr);
@@ -681,6 +682,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf);
 void tracing_reset_current(int cpu);
 void tracing_reset_all_online_cpus(void);
 int tracing_open_generic(struct inode *inode, struct file *filp);
+int tracing_open_generic_tr(struct inode *inode, struct file *filp);
 bool tracing_is_disabled(void);
 bool tracer_tracing_is_on(struct trace_array *tr);
 void tracer_tracing_on(struct trace_array *tr);
index a41fed46c285c3f83726b184b723b4a9e3dd772b..89779eb84a0778ac052e8d1c7300cf4dabdb09e0 100644 (file)
@@ -174,6 +174,10 @@ static int dyn_event_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = tracing_check_open_get_tr(NULL);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(NULL);
                if (ret < 0)
index 0892e38ed6fbeca73258ce357d678c922e48468d..a9dfa04ffa4499117833495aded1db5fdc19eedd 100644 (file)
@@ -272,9 +272,11 @@ int perf_kprobe_init(struct perf_event *p_event, bool is_retprobe)
                goto out;
        }
 
+       mutex_lock(&event_mutex);
        ret = perf_trace_event_init(tp_event, p_event);
        if (ret)
                destroy_local_trace_kprobe(tp_event);
+       mutex_unlock(&event_mutex);
 out:
        kfree(func);
        return ret;
@@ -282,8 +284,10 @@ out:
 
 void perf_kprobe_destroy(struct perf_event *p_event)
 {
+       mutex_lock(&event_mutex);
        perf_trace_event_close(p_event);
        perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
 
        destroy_local_trace_kprobe(p_event->tp_event);
 }
index b89cdfe20bc1626b1c4632c6bfcace5eedbfe1ef..fba87d10f0c1af1ab9367a981667cc452efd4707 100644 (file)
@@ -12,6 +12,7 @@
 #define pr_fmt(fmt) fmt
 
 #include <linux/workqueue.h>
+#include <linux/security.h>
 #include <linux/spinlock.h>
 #include <linux/kthread.h>
 #include <linux/tracefs.h>
@@ -1294,6 +1295,8 @@ static int trace_format_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        int ret;
 
+       /* Do we want to hide event format files on tracefs lockdown? */
+
        ret = seq_open(file, &trace_format_seq_ops);
        if (ret < 0)
                return ret;
@@ -1440,28 +1443,17 @@ static int system_tr_open(struct inode *inode, struct file *filp)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (tracing_is_disabled())
-               return -ENODEV;
-
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
-
        /* Make a temporary dir that has no system but points to tr */
        dir = kzalloc(sizeof(*dir), GFP_KERNEL);
-       if (!dir) {
-               trace_array_put(tr);
+       if (!dir)
                return -ENOMEM;
-       }
 
-       dir->tr = tr;
-
-       ret = tracing_open_generic(inode, filp);
+       ret = tracing_open_generic_tr(inode, filp);
        if (ret < 0) {
-               trace_array_put(tr);
                kfree(dir);
                return ret;
        }
-
+       dir->tr = tr;
        filp->private_data = dir;
 
        return 0;
@@ -1771,6 +1763,10 @@ ftrace_event_open(struct inode *inode, struct file *file,
        struct seq_file *m;
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        ret = seq_open(file, seq_ops);
        if (ret < 0)
                return ret;
@@ -1795,6 +1791,7 @@ ftrace_event_avail_open(struct inode *inode, struct file *file)
 {
        const struct seq_operations *seq_ops = &show_event_seq_ops;
 
+       /* Checks for tracefs lockdown */
        return ftrace_event_open(inode, file, seq_ops);
 }
 
@@ -1805,8 +1802,9 @@ ftrace_event_set_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
@@ -1825,8 +1823,9 @@ ftrace_event_set_pid_open(struct inode *inode, struct file *file)
        struct trace_array *tr = inode->i_private;
        int ret;
 
-       if (trace_array_get(tr) < 0)
-               return -ENODEV;
+       ret = tracing_check_open_get_tr(tr);
+       if (ret)
+               return ret;
 
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC))
index 9468bd8d44a2d687939b83f081b4bcf5051fd54d..7482a1466ebfede5ce50e458d5b80cc2c6a18951 100644 (file)
@@ -7,6 +7,7 @@
 
 #include <linux/module.h>
 #include <linux/kallsyms.h>
+#include <linux/security.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
 #include <linux/stacktrace.h>
@@ -678,6 +679,8 @@ static bool synth_field_signed(char *type)
 {
        if (str_has_prefix(type, "u"))
                return false;
+       if (strcmp(type, "gfp_t") == 0)
+               return false;
 
        return true;
 }
@@ -1448,6 +1451,10 @@ static int synth_events_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&synth_event_ops);
                if (ret < 0)
@@ -1680,7 +1687,7 @@ static int save_hist_vars(struct hist_trigger_data *hist_data)
        if (var_data)
                return 0;
 
-       if (trace_array_get(tr) < 0)
+       if (tracing_check_open_get_tr(tr))
                return -ENODEV;
 
        var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
@@ -5515,6 +5522,12 @@ static int hist_show(struct seq_file *m, void *v)
 
 static int event_hist_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return single_open(file, hist_show, file);
 }
 
index 2a2912cb4533fef2ba7170c7f8ddbf25676bc40d..2cd53ca21b515f620873572845a8e591ae44a968 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2013 Tom Zanussi <tom.zanussi@linux.intel.com>
  */
 
+#include <linux/security.h>
 #include <linux/module.h>
 #include <linux/ctype.h>
 #include <linux/mutex.h>
@@ -173,7 +174,11 @@ static const struct seq_operations event_triggers_seq_ops = {
 
 static int event_trigger_regex_open(struct inode *inode, struct file *file)
 {
-       int ret = 0;
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
 
        mutex_lock(&event_mutex);
 
@@ -292,6 +297,7 @@ event_trigger_write(struct file *filp, const char __user *ubuf,
 static int
 event_trigger_open(struct inode *inode, struct file *filp)
 {
+       /* Checks for tracefs lockdown */
        return event_trigger_regex_open(inode, filp);
 }
 
index fa95139445b28609b14544342ac6f4c88f3f56bd..862f4b0139fcb33bf0902fcd635c9bd6f440522c 100644 (file)
@@ -150,7 +150,7 @@ void trace_hwlat_callback(bool enter)
                if (enter)
                        nmi_ts_start = time_get();
                else
-                       nmi_total_ts = time_get() - nmi_ts_start;
+                       nmi_total_ts += time_get() - nmi_ts_start;
        }
 
        if (enter)
@@ -256,6 +256,8 @@ static int get_sample(void)
                /* Keep a running maximum ever recorded hardware latency */
                if (sample > tr->max_latency)
                        tr->max_latency = sample;
+               if (outer_sample > tr->max_latency)
+                       tr->max_latency = outer_sample;
        }
 
 out:
index 324ffbea35567d5167511bac40aefbfa51b1268c..1552a95c743bfbbc70451501cdd3b4763036ede3 100644 (file)
@@ -7,11 +7,11 @@
  */
 #define pr_fmt(fmt)    "trace_kprobe: " fmt
 
+#include <linux/security.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
 #include <linux/rculist.h>
 #include <linux/error-injection.h>
-#include <linux/security.h>
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 
@@ -936,6 +936,10 @@ static int probes_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&trace_kprobe_ops);
                if (ret < 0)
@@ -988,6 +992,12 @@ static const struct seq_operations profile_seq_op = {
 
 static int profile_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &profile_seq_op);
 }
 
index c3fd849d4a8f93c27d18713ae1e492faa5d24ec9..d4e31e96920650fef131562dda2ff5cc52b39fbb 100644 (file)
@@ -6,6 +6,7 @@
  *
  */
 #include <linux/seq_file.h>
+#include <linux/security.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
 #include <linux/ftrace.h>
@@ -348,6 +349,12 @@ static const struct seq_operations show_format_seq_ops = {
 static int
 ftrace_formats_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &show_format_seq_ops);
 }
 
index ec9a34a97129230e5dea84f88b583871f993f6ea..4df9a209f7caf2aaf213c7a45e317dd0c79338f2 100644 (file)
@@ -5,6 +5,7 @@
  */
 #include <linux/sched/task_stack.h>
 #include <linux/stacktrace.h>
+#include <linux/security.h>
 #include <linux/kallsyms.h>
 #include <linux/seq_file.h>
 #include <linux/spinlock.h>
@@ -470,6 +471,12 @@ static const struct seq_operations stack_trace_seq_ops = {
 
 static int stack_trace_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &stack_trace_seq_ops);
 }
 
@@ -487,6 +494,7 @@ stack_trace_filter_open(struct inode *inode, struct file *file)
 {
        struct ftrace_ops *ops = inode->i_private;
 
+       /* Checks for tracefs lockdown */
        return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
                                 inode, file);
 }
index 75bf1bcb4a8a56164641549f51bbec41584afe24..9ab0a1a7ad5ee042dff2e25b3485204cea2e844d 100644 (file)
@@ -9,7 +9,7 @@
  *
  */
 
-
+#include <linux/security.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
@@ -238,6 +238,10 @@ static int tracing_stat_open(struct inode *inode, struct file *file)
        struct seq_file *m;
        struct stat_session *session = inode->i_private;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        ret = stat_seq_init(session);
        if (ret)
                return ret;
index dd884341f5c57aec35dbc54e7649044a7a19eb24..352073d36585ade088c3610725a1af273f4fb5a8 100644 (file)
@@ -7,6 +7,7 @@
  */
 #define pr_fmt(fmt)    "trace_uprobe: " fmt
 
+#include <linux/security.h>
 #include <linux/ctype.h>
 #include <linux/module.h>
 #include <linux/uaccess.h>
@@ -769,6 +770,10 @@ static int probes_open(struct inode *inode, struct file *file)
 {
        int ret;
 
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
                ret = dyn_events_release_all(&trace_uprobe_ops);
                if (ret)
@@ -818,6 +823,12 @@ static const struct seq_operations profile_seq_op = {
 
 static int profile_open(struct inode *inode, struct file *file)
 {
+       int ret;
+
+       ret = security_locked_down(LOCKDOWN_TRACEFS);
+       if (ret)
+               return ret;
+
        return seq_open(file, &profile_seq_op);
 }
 
index 5cff72f18c4a4ce4f0770f968dadefd4e6d190ad..33ffbf30885394c89d2c8c933eeb50a7769c357e 100644 (file)
@@ -106,7 +106,12 @@ retry:
                was_locked = 1;
        } else {
                local_irq_restore(flags);
-               cpu_relax();
+               /*
+                * Wait for the lock to release before jumping to
+                * atomic_cmpxchg() in order to mitigate the thundering herd
+                * problem.
+                */
+               do { cpu_relax(); } while (atomic_read(&dump_lock) != -1);
                goto retry;
        }
 
index ae25e2fa2187bc0ec9a90691c008fe4b53a3c0cf..f25eb111c0516f0191477662a1a88211caca053e 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/export.h>
 #include <linux/generic-radix-tree.h>
 #include <linux/gfp.h>
+#include <linux/kmemleak.h>
 
 #define GENRADIX_ARY           (PAGE_SIZE / sizeof(struct genradix_node *))
 #define GENRADIX_ARY_SHIFT     ilog2(GENRADIX_ARY)
@@ -75,6 +76,27 @@ void *__genradix_ptr(struct __genradix *radix, size_t offset)
 }
 EXPORT_SYMBOL(__genradix_ptr);
 
+static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
+{
+       struct genradix_node *node;
+
+       node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
+
+       /*
+        * We're using pages (not slab allocations) directly for kernel data
+        * structures, so we need to explicitly inform kmemleak of them in order
+        * to avoid false positive memory leak reports.
+        */
+       kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
+       return node;
+}
+
+static inline void genradix_free_node(struct genradix_node *node)
+{
+       kmemleak_free(node);
+       free_page((unsigned long)node);
+}
+
 /*
  * Returns pointer to the specified byte @offset within @radix, allocating it if
  * necessary - newly allocated slots are always zeroed out:
@@ -97,8 +119,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
                        break;
 
                if (!new_node) {
-                       new_node = (void *)
-                               __get_free_page(gfp_mask|__GFP_ZERO);
+                       new_node = genradix_alloc_node(gfp_mask);
                        if (!new_node)
                                return NULL;
                }
@@ -121,8 +142,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
                n = READ_ONCE(*p);
                if (!n) {
                        if (!new_node) {
-                               new_node = (void *)
-                                       __get_free_page(gfp_mask|__GFP_ZERO);
+                               new_node = genradix_alloc_node(gfp_mask);
                                if (!new_node)
                                        return NULL;
                        }
@@ -133,7 +153,7 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
        }
 
        if (new_node)
-               free_page((unsigned long) new_node);
+               genradix_free_node(new_node);
 
        return &n->data[offset];
 }
@@ -191,7 +211,7 @@ static void genradix_free_recurse(struct genradix_node *n, unsigned level)
                                genradix_free_recurse(n->children[i], level - 1);
        }
 
-       free_page((unsigned long) n);
+       genradix_free_node(n);
 }
 
 int __genradix_prealloc(struct __genradix *radix, size_t size,
index cd7a10c192109967ebefb4f123a4cf5f9c7b4e3a..08ec58cc673b5a732594cfc728083e6541db1475 100644 (file)
@@ -748,27 +748,6 @@ void *memset(void *s, int c, size_t count)
 EXPORT_SYMBOL(memset);
 #endif
 
-/**
- * memzero_explicit - Fill a region of memory (e.g. sensitive
- *                   keying data) with 0s.
- * @s: Pointer to the start of the area.
- * @count: The size of the area.
- *
- * Note: usually using memset() is just fine (!), but in cases
- * where clearing out _local_ data at the end of a scope is
- * necessary, memzero_explicit() should be used instead in
- * order to prevent the compiler from optimising away zeroing.
- *
- * memzero_explicit() doesn't need an arch-specific version as
- * it just invokes the one of memset() implicitly.
- */
-void memzero_explicit(void *s, size_t count)
-{
-       memset(s, 0, count);
-       barrier_data(s);
-}
-EXPORT_SYMBOL(memzero_explicit);
-
 #ifndef __HAVE_ARCH_MEMSET16
 /**
  * memset16() - Fill a memory area with a uint16_t
index 28ff554a1be867fde4da4ecb2cd1631633f75f34..6c0005d5dd5c43e7ed4097c24d5fc785a794110e 100644 (file)
@@ -3,16 +3,10 @@
 #include <linux/export.h>
 #include <linux/uaccess.h>
 #include <linux/mm.h>
+#include <linux/bitops.h>
 
 #include <asm/word-at-a-time.h>
 
-/* Set bits in the first 'n' bytes when loaded from memory */
-#ifdef __LITTLE_ENDIAN
-#  define aligned_byte_mask(n) ((1ul << 8*(n))-1)
-#else
-#  define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
-#endif
-
 /*
  * Do a strnlen, return length of string *with* final '\0'.
  * 'count' is the user-supplied count, while 'max' is the
index 9729f271d15041ac1930eb85922b81ba094a574f..9742e5cb853aa18313d533b17f73d85dc17d50e5 100644 (file)
@@ -297,6 +297,32 @@ out:
        return 1;
 }
 
+static int __init do_kmem_cache_size_bulk(int size, int *total_failures)
+{
+       struct kmem_cache *c;
+       int i, iter, maxiter = 1024;
+       int num, bytes;
+       bool fail = false;
+       void *objects[10];
+
+       c = kmem_cache_create("test_cache", size, size, 0, NULL);
+       for (iter = 0; (iter < maxiter) && !fail; iter++) {
+               num = kmem_cache_alloc_bulk(c, GFP_KERNEL, ARRAY_SIZE(objects),
+                                           objects);
+               for (i = 0; i < num; i++) {
+                       bytes = count_nonzero_bytes(objects[i], size);
+                       if (bytes)
+                               fail = true;
+                       fill_with_garbage(objects[i], size);
+               }
+
+               if (num)
+                       kmem_cache_free_bulk(c, num, objects);
+       }
+       *total_failures += fail;
+       return 1;
+}
+
 /*
  * Test kmem_cache allocation by creating caches of different sizes, with and
  * without constructors, with and without SLAB_TYPESAFE_BY_RCU.
@@ -318,6 +344,7 @@ static int __init test_kmemcache(int *total_failures)
                        num_tests += do_kmem_cache_size(size, ctor, rcu, zero,
                                                        &failures);
                }
+               num_tests += do_kmem_cache_size_bulk(size, &failures);
        }
        REPORT_FAILURES_IN_FN();
        *total_failures += failures;
index 67bcd5dfd847a651150f2f52d6c69f561c15f557..5ff04d8fe9716354bb2d2f60117a65dc8481c880 100644 (file)
 # define TEST_U64
 #endif
 
-#define test(condition, msg)           \
-({                                     \
-       int cond = (condition);         \
-       if (cond)                       \
-               pr_warn("%s\n", msg);   \
-       cond;                           \
+#define test(condition, msg, ...)                                      \
+({                                                                     \
+       int cond = (condition);                                         \
+       if (cond)                                                       \
+               pr_warn("[%d] " msg "\n", __LINE__, ##__VA_ARGS__);     \
+       cond;                                                           \
 })
 
+static bool is_zeroed(void *from, size_t size)
+{
+       return memchr_inv(from, 0x0, size) == NULL;
+}
+
+static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size)
+{
+       int ret = 0;
+       size_t start, end, i, zero_start, zero_end;
+
+       if (test(size < 2 * PAGE_SIZE, "buffer too small"))
+               return -EINVAL;
+
+       /*
+        * We want to cross a page boundary to exercise the code more
+        * effectively. We also don't want to make the size we scan too large,
+        * otherwise the test can take a long time and cause soft lockups. So
+        * scan a 1024 byte region across the page boundary.
+        */
+       size = 1024;
+       start = PAGE_SIZE - (size / 2);
+
+       kmem += start;
+       umem += start;
+
+       zero_start = size / 4;
+       zero_end = size - zero_start;
+
+       /*
+        * We conduct a series of check_nonzero_user() tests on a block of
+        * memory with the following byte-pattern (trying every possible
+        * [start,end] pair):
+        *
+        *   [ 00 ff 00 ff ... 00 00 00 00 ... ff 00 ff 00 ]
+        *
+        * And we verify that check_nonzero_user() acts identically to
+        * memchr_inv().
+        */
+
+       memset(kmem, 0x0, size);
+       for (i = 1; i < zero_start; i += 2)
+               kmem[i] = 0xff;
+       for (i = zero_end; i < size; i += 2)
+               kmem[i] = 0xff;
+
+       ret |= test(copy_to_user(umem, kmem, size),
+                   "legitimate copy_to_user failed");
+
+       for (start = 0; start <= size; start++) {
+               for (end = start; end <= size; end++) {
+                       size_t len = end - start;
+                       int retval = check_zeroed_user(umem + start, len);
+                       int expected = is_zeroed(kmem + start, len);
+
+                       ret |= test(retval != expected,
+                                   "check_nonzero_user(=%d) != memchr_inv(=%d) mismatch (start=%zu, end=%zu)",
+                                   retval, expected, start, end);
+               }
+       }
+
+       return ret;
+}
+
+static int test_copy_struct_from_user(char *kmem, char __user *umem,
+                                     size_t size)
+{
+       int ret = 0;
+       char *umem_src = NULL, *expected = NULL;
+       size_t ksize, usize;
+
+       umem_src = kmalloc(size, GFP_KERNEL);
+       ret = test(umem_src == NULL, "kmalloc failed");
+       if (ret)
+               goto out_free;
+
+       expected = kmalloc(size, GFP_KERNEL);
+       ret = test(expected == NULL, "kmalloc failed");
+       if (ret)
+               goto out_free;
+
+       /* Fill umem with a fixed byte pattern. */
+       memset(umem_src, 0x3e, size);
+       ret |= test(copy_to_user(umem, umem_src, size),
+                   "legitimate copy_to_user failed");
+
+       /* Check basic case -- (usize == ksize). */
+       ksize = size;
+       usize = size;
+
+       memcpy(expected, umem_src, ksize);
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize == ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize == ksize) gives unexpected copy");
+
+       /* Old userspace case -- (usize < ksize). */
+       ksize = size;
+       usize = size / 2;
+
+       memcpy(expected, umem_src, usize);
+       memset(expected + usize, 0x0, ksize - usize);
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize < ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize < ksize) gives unexpected copy");
+
+       /* New userspace (-E2BIG) case -- (usize > ksize). */
+       ksize = size / 2;
+       usize = size;
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG,
+                   "copy_struct_from_user(usize > ksize) didn't give E2BIG");
+
+       /* New userspace (success) case -- (usize > ksize). */
+       ksize = size / 2;
+       usize = size;
+
+       memcpy(expected, umem_src, ksize);
+       ret |= test(clear_user(umem + ksize, usize - ksize),
+                   "legitimate clear_user failed");
+
+       memset(kmem, 0x0, size);
+       ret |= test(copy_struct_from_user(kmem, ksize, umem, usize),
+                   "copy_struct_from_user(usize > ksize) failed");
+       ret |= test(memcmp(kmem, expected, ksize),
+                   "copy_struct_from_user(usize > ksize) gives unexpected copy");
+
+out_free:
+       kfree(expected);
+       kfree(umem_src);
+       return ret;
+}
+
 static int __init test_user_copy_init(void)
 {
        int ret = 0;
@@ -106,6 +244,11 @@ static int __init test_user_copy_init(void)
 #endif
 #undef test_legit
 
+       /* Test usage of check_nonzero_user(). */
+       ret |= test_check_nonzero_user(kmem, usermem, 2 * PAGE_SIZE);
+       /* Test usage of copy_struct_from_user(). */
+       ret |= test_copy_struct_from_user(kmem, usermem, 2 * PAGE_SIZE);
+
        /*
         * Invalid usage: none of these copies should succeed.
         */
index 4f16eec5d5544e8846d6545d4eb386712c52fb5f..f68dea8806be2770662f8cabecd4fcf2660eed62 100644 (file)
@@ -89,9 +89,9 @@
  *       goto errout;
  *   }
  *
- *   pos = textsearch_find_continuous(conf, \&state, example, strlen(example));
+ *   pos = textsearch_find_continuous(conf, &state, example, strlen(example));
  *   if (pos != UINT_MAX)
- *       panic("Oh my god, dancing chickens at \%d\n", pos);
+ *       panic("Oh my god, dancing chickens at %d\n", pos);
  *
  *   textsearch_destroy(conf);
  */
index c2bfbcaeb3dc5bb9dc2a1efd4b30acd2e0ad6f90..cbb4d9ec00f207a8156cab8a4aa52fb8a49ff43e 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <linux/uaccess.h>
+#include <linux/bitops.h>
 
 /* out-of-line parts */
 
@@ -31,3 +32,57 @@ unsigned long _copy_to_user(void __user *to, const void *from, unsigned long n)
 }
 EXPORT_SYMBOL(_copy_to_user);
 #endif
+
+/**
+ * check_zeroed_user: check if a userspace buffer only contains zero bytes
+ * @from: Source address, in userspace.
+ * @size: Size of buffer.
+ *
+ * This is effectively shorthand for "memchr_inv(from, 0, size) == NULL" for
+ * userspace addresses (and is more efficient because we don't care where the
+ * first non-zero byte is).
+ *
+ * Returns:
+ *  * 0: There were non-zero bytes present in the buffer.
+ *  * 1: The buffer was full of zero bytes.
+ *  * -EFAULT: access to userspace failed.
+ */
+int check_zeroed_user(const void __user *from, size_t size)
+{
+       unsigned long val;
+       uintptr_t align = (uintptr_t) from % sizeof(unsigned long);
+
+       if (unlikely(size == 0))
+               return 1;
+
+       from -= align;
+       size += align;
+
+       if (!user_access_begin(from, size))
+               return -EFAULT;
+
+       unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+       if (align)
+               val &= ~aligned_byte_mask(align);
+
+       while (size > sizeof(unsigned long)) {
+               if (unlikely(val))
+                       goto done;
+
+               from += sizeof(unsigned long);
+               size -= sizeof(unsigned long);
+
+               unsafe_get_user(val, (unsigned long __user *) from, err_fault);
+       }
+
+       if (size < sizeof(unsigned long))
+               val &= aligned_byte_mask(size);
+
+done:
+       user_access_end();
+       return (val == 0);
+err_fault:
+       user_access_end();
+       return -EFAULT;
+}
+EXPORT_SYMBOL(check_zeroed_user);
index cc00364bd2c28610cb4fe9ae282c95e4895f7b03..9fe698ff62ec4ffcaaf6c9e838c358bc31a57bca 100644 (file)
@@ -24,13 +24,4 @@ config GENERIC_COMPAT_VDSO
        help
          This config option enables the compat VDSO layer.
 
-config CROSS_COMPILE_COMPAT_VDSO
-       string "32 bit Toolchain prefix for compat vDSO"
-       default ""
-       depends on GENERIC_COMPAT_VDSO
-       help
-         Defines the cross-compiler prefix for compiling compat vDSO.
-         If a 64 bit compiler (i.e. x86_64) can compile the VDSO for
-         32 bit, it does not need to define this parameter.
-
 endif
index e630e7ff57f1f9102e28820ef98f9c49e23da2ed..45f57fd2db649999741f4c2b9d375635453b53db 100644 (file)
@@ -214,9 +214,10 @@ int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
                return -1;
        }
 
-       res->tv_sec = 0;
-       res->tv_nsec = ns;
-
+       if (likely(res)) {
+               res->tv_sec = 0;
+               res->tv_nsec = ns;
+       }
        return 0;
 }
 
@@ -245,7 +246,7 @@ __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
                ret = clock_getres_fallback(clock, &ts);
 #endif
 
-       if (likely(!ret)) {
+       if (likely(!ret && res)) {
                res->tv_sec = ts.tv_sec;
                res->tv_nsec = ts.tv_nsec;
        }
index d9daa3e422d05debb676e2014746e93346f17028..c360f6a6c8443fe6a78d325fefd2d6c45338e38c 100644 (file)
@@ -239,8 +239,8 @@ static int __init default_bdi_init(void)
 {
        int err;
 
-       bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE |
-                                             WQ_UNBOUND | WQ_SYSFS, 0);
+       bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND |
+                                WQ_SYSFS, 0);
        if (!bdi_wq)
                return -ENOMEM;
 
index ce08b39d85d40f50011111b5ec0621f253ac30a8..672d3c78c6abfcfa152b95113701982fac8221e2 100644 (file)
@@ -270,14 +270,15 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
        /* Ensure the start of the pageblock or zone is online and valid */
        block_pfn = pageblock_start_pfn(pfn);
-       block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
+       block_pfn = max(block_pfn, zone->zone_start_pfn);
+       block_page = pfn_to_online_page(block_pfn);
        if (block_page) {
                page = block_page;
                pfn = block_pfn;
        }
 
        /* Ensure the end of the pageblock or zone is online and valid */
-       block_pfn += pageblock_nr_pages;
+       block_pfn = pageblock_end_pfn(pfn) - 1;
        block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
        end_page = pfn_to_online_page(block_pfn);
        if (!end_page)
@@ -303,7 +304,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
 
                page += (1 << PAGE_ALLOC_COSTLY_ORDER);
                pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
-       } while (page < end_page);
+       } while (page <= end_page);
 
        return false;
 }
index 1146fcfa321511b61e5dd258c9825b6bf759426e..85b7d087eb45a769fd88cbbd15d3647f7702baf7 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/rmap.h>
 #include <linux/delayacct.h>
 #include <linux/psi.h>
+#include <linux/ramfs.h>
 #include "internal.h"
 
 #define CREATE_TRACE_POINTS
index 23a9f9c9d377543f9ad28d8e935a896ecbb95caa..8f236a335ae9d25a81665f282f7ef9ccba7802dc 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1973,7 +1973,8 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
 }
 
 static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
-                      unsigned long end, int write, struct page **pages, int *nr)
+                      unsigned long end, unsigned int flags,
+                      struct page **pages, int *nr)
 {
        unsigned long pte_end;
        struct page *head, *page;
@@ -1986,7 +1987,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 
        pte = READ_ONCE(*ptep);
 
-       if (!pte_access_permitted(pte, write))
+       if (!pte_access_permitted(pte, flags & FOLL_WRITE))
                return 0;
 
        /* hugepages are never "special" */
@@ -2023,7 +2024,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
 }
 
 static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
-               unsigned int pdshift, unsigned long end, int write,
+               unsigned int pdshift, unsigned long end, unsigned int flags,
                struct page **pages, int *nr)
 {
        pte_t *ptep;
@@ -2033,7 +2034,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
        ptep = hugepte_offset(hugepd, addr, pdshift);
        do {
                next = hugepte_addr_end(addr, end, sz);
-               if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
+               if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
                        return 0;
        } while (ptep++, addr = next, addr != end);
 
@@ -2041,7 +2042,7 @@ static int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 }
 #else
 static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
-               unsigned pdshift, unsigned long end, int write,
+               unsigned int pdshift, unsigned long end, unsigned int flags,
                struct page **pages, int *nr)
 {
        return 0;
@@ -2049,7 +2050,8 @@ static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 #endif /* CONFIG_ARCH_HAS_HUGEPD */
 
 static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
-               unsigned long end, unsigned int flags, struct page **pages, int *nr)
+                       unsigned long end, unsigned int flags,
+                       struct page **pages, int *nr)
 {
        struct page *head, *page;
        int refs;
index c5cb6dcd6c69664c4e9c71d02c20ecae53362e9b..13cc93785006fa03289b8b7318e9e3bf1fd7b056 100644 (file)
@@ -2789,8 +2789,13 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                        ds_queue->split_queue_len--;
                        list_del(page_deferred_list(head));
                }
-               if (mapping)
-                       __dec_node_page_state(page, NR_SHMEM_THPS);
+               if (mapping) {
+                       if (PageSwapBacked(page))
+                               __dec_node_page_state(page, NR_SHMEM_THPS);
+                       else
+                               __dec_node_page_state(page, NR_FILE_THPS);
+               }
+
                spin_unlock(&ds_queue->split_queue_lock);
                __split_huge_page(page, list, end, flags);
                if (PageSwapCache(head)) {
index ef37c85423a5209c528dbbb3e08f48a8bfcb5009..b45a95363a844a12f54eb904437f3fd562057b50 100644 (file)
@@ -1084,11 +1084,10 @@ static bool pfn_range_valid_gigantic(struct zone *z,
        struct page *page;
 
        for (i = start_pfn; i < end_pfn; i++) {
-               if (!pfn_valid(i))
+               page = pfn_to_online_page(i);
+               if (!page)
                        return false;
 
-               page = pfn_to_page(i);
-
                if (page_zone(page) != z)
                        return false;
 
index fb1e15028ef06b6ff5b68a125df7e434d52787b8..19603302a77ffa9745861183d1afeacd547e0730 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/spinlock.h>
 #include <linux/list.h>
 #include <linux/cpumask.h>
+#include <linux/mman.h>
 
 #include <linux/atomic.h>
 #include <linux/user_namespace.h>
index 0a1b4b484ac5b4a0eed5e5148f04849d9e09607b..f05d27b7183dcd9699427e19fdc1b8cd00642028 100644 (file)
@@ -1028,12 +1028,13 @@ static void collapse_huge_page(struct mm_struct *mm,
 
        anon_vma_lock_write(vma->anon_vma);
 
-       pte = pte_offset_map(pmd, address);
-       pte_ptl = pte_lockptr(mm, pmd);
-
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
                                address, address + HPAGE_PMD_SIZE);
        mmu_notifier_invalidate_range_start(&range);
+
+       pte = pte_offset_map(pmd, address);
+       pte_ptl = pte_lockptr(mm, pmd);
+
        pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
        /*
         * After this gup_fast can't run anymore. This also removes
index 03a8d84badada040c3b8e16200e244ef9ba10094..244607663363141d170f50e0b736d9fdc20c712c 100644 (file)
@@ -526,6 +526,16 @@ static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
        return object;
 }
 
+/*
+ * Remove an object from the object_tree_root and object_list. Must be called
+ * with the kmemleak_lock held _if_ kmemleak is still enabled.
+ */
+static void __remove_object(struct kmemleak_object *object)
+{
+       rb_erase(&object->rb_node, &object_tree_root);
+       list_del_rcu(&object->object_list);
+}
+
 /*
  * Look up an object in the object search tree and remove it from both
  * object_tree_root and object_list. The returned object's use_count should be
@@ -538,10 +548,8 @@ static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int ali
 
        write_lock_irqsave(&kmemleak_lock, flags);
        object = lookup_object(ptr, alias);
-       if (object) {
-               rb_erase(&object->rb_node, &object_tree_root);
-               list_del_rcu(&object->object_list);
-       }
+       if (object)
+               __remove_object(object);
        write_unlock_irqrestore(&kmemleak_lock, flags);
 
        return object;
@@ -1834,12 +1842,16 @@ static const struct file_operations kmemleak_fops = {
 
 static void __kmemleak_do_cleanup(void)
 {
-       struct kmemleak_object *object;
+       struct kmemleak_object *object, *tmp;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(object, &object_list, object_list)
-               delete_object_full(object->pointer);
-       rcu_read_unlock();
+       /*
+        * Kmemleak has already been disabled, no need for RCU list traversal
+        * or kmemleak_lock held.
+        */
+       list_for_each_entry_safe(object, tmp, &object_list, object_list) {
+               __remove_object(object);
+               __delete_object(object);
+       }
 }
 
 /*
index 7d4f61ae666a61123bea4b59b9b02f90ba401f3a..c4b16cae2bc9bdfec99120b14478a4c46ae94627 100644 (file)
@@ -1356,9 +1356,6 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
                align = SMP_CACHE_BYTES;
        }
 
-       if (end > memblock.current_limit)
-               end = memblock.current_limit;
-
 again:
        found = memblock_find_in_range_node(size, align, start, end, nid,
                                            flags);
@@ -1469,6 +1466,9 @@ static void * __init memblock_alloc_internal(
        if (WARN_ON_ONCE(slab_is_available()))
                return kzalloc_node(size, GFP_NOWAIT, nid);
 
+       if (max_addr > memblock.current_limit)
+               max_addr = memblock.current_limit;
+
        alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid);
 
        /* retry allocation without lower limit */
index c313c49074cad4916e7aa6b74968c548690a451d..37592dd7ae3264ba4bf8cc94c63910f56585aa5b 100644 (file)
@@ -484,7 +484,7 @@ ino_t page_cgroup_ino(struct page *page)
        unsigned long ino = 0;
 
        rcu_read_lock();
-       if (PageHead(page) && PageSlab(page))
+       if (PageSlab(page) && !PageTail(page))
                memcg = memcg_from_slab_page(page);
        else
                memcg = READ_ONCE(page->mem_cgroup);
@@ -1567,6 +1567,11 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg)
        return max;
 }
 
+unsigned long mem_cgroup_size(struct mem_cgroup *memcg)
+{
+       return page_counter_read(&memcg->memory);
+}
+
 static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                     int order)
 {
@@ -2529,6 +2534,15 @@ retry:
                goto retry;
        }
 
+       /*
+        * Memcg doesn't have a dedicated reserve for atomic
+        * allocations. But like the global atomic pool, we need to
+        * put the burden of reclaim on regular allocation requests
+        * and let these go through as privileged allocations.
+        */
+       if (gfp_mask & __GFP_ATOMIC)
+               goto force;
+
        /*
         * Unlike in global OOM situations, memcg is not in a physical
         * memory shortage.  Allow dying and OOM-killed tasks to
@@ -5009,12 +5023,6 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 {
        int node;
 
-       /*
-        * Flush percpu vmstats and vmevents to guarantee the value correctness
-        * on parent's and all ancestor levels.
-        */
-       memcg_flush_percpu_vmstats(memcg, false);
-       memcg_flush_percpu_vmevents(memcg);
        for_each_node(node)
                free_mem_cgroup_per_node_info(memcg, node);
        free_percpu(memcg->vmstats_percpu);
@@ -5025,6 +5033,12 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
 static void mem_cgroup_free(struct mem_cgroup *memcg)
 {
        memcg_wb_domain_exit(memcg);
+       /*
+        * Flush percpu vmstats and vmevents to guarantee the value correctness
+        * on parent's and all ancestor levels.
+        */
+       memcg_flush_percpu_vmstats(memcg, false);
+       memcg_flush_percpu_vmevents(memcg);
        __mem_cgroup_free(memcg);
 }
 
@@ -5415,6 +5429,8 @@ static int mem_cgroup_move_account(struct page *page,
                                   struct mem_cgroup *from,
                                   struct mem_cgroup *to)
 {
+       struct lruvec *from_vec, *to_vec;
+       struct pglist_data *pgdat;
        unsigned long flags;
        unsigned int nr_pages = compound ? hpage_nr_pages(page) : 1;
        int ret;
@@ -5438,11 +5454,15 @@ static int mem_cgroup_move_account(struct page *page,
 
        anon = PageAnon(page);
 
+       pgdat = page_pgdat(page);
+       from_vec = mem_cgroup_lruvec(pgdat, from);
+       to_vec = mem_cgroup_lruvec(pgdat, to);
+
        spin_lock_irqsave(&from->move_lock, flags);
 
        if (!anon && page_mapped(page)) {
-               __mod_memcg_state(from, NR_FILE_MAPPED, -nr_pages);
-               __mod_memcg_state(to, NR_FILE_MAPPED, nr_pages);
+               __mod_lruvec_state(from_vec, NR_FILE_MAPPED, -nr_pages);
+               __mod_lruvec_state(to_vec, NR_FILE_MAPPED, nr_pages);
        }
 
        /*
@@ -5454,14 +5474,14 @@ static int mem_cgroup_move_account(struct page *page,
                struct address_space *mapping = page_mapping(page);
 
                if (mapping_cap_account_dirty(mapping)) {
-                       __mod_memcg_state(from, NR_FILE_DIRTY, -nr_pages);
-                       __mod_memcg_state(to, NR_FILE_DIRTY, nr_pages);
+                       __mod_lruvec_state(from_vec, NR_FILE_DIRTY, -nr_pages);
+                       __mod_lruvec_state(to_vec, NR_FILE_DIRTY, nr_pages);
                }
        }
 
        if (PageWriteback(page)) {
-               __mod_memcg_state(from, NR_WRITEBACK, -nr_pages);
-               __mod_memcg_state(to, NR_WRITEBACK, nr_pages);
+               __mod_lruvec_state(from_vec, NR_WRITEBACK, -nr_pages);
+               __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
        }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
index 7ef849da8278c1ce659f4930c5df17fbe7e2f7d5..3151c87dff73e68feceb37225a99db991e7a5130 100644 (file)
@@ -199,7 +199,6 @@ struct to_kill {
        struct task_struct *tsk;
        unsigned long addr;
        short size_shift;
-       char addr_valid;
 };
 
 /*
@@ -324,22 +323,27 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
                }
        }
        tk->addr = page_address_in_vma(p, vma);
-       tk->addr_valid = 1;
        if (is_zone_device_page(p))
                tk->size_shift = dev_pagemap_mapping_shift(p, vma);
        else
                tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
 
        /*
-        * In theory we don't have to kill when the page was
-        * munmaped. But it could be also a mremap. Since that's
-        * likely very rare kill anyways just out of paranoia, but use
-        * a SIGKILL because the error is not contained anymore.
+        * Send SIGKILL if "tk->addr == -EFAULT". Also, as
+        * "tk->size_shift" is always non-zero for !is_zone_device_page(),
+        * so "tk->size_shift == 0" effectively checks no mapping on
+        * ZONE_DEVICE. Indeed, when a devdax page is mmapped N times
+        * to a process' address space, it's possible not all N VMAs
+        * contain mappings for the page, but at least one VMA does.
+        * Only deliver SIGBUS with payload derived from the VMA that
+        * has a mapping for the page.
         */
-       if (tk->addr == -EFAULT || tk->size_shift == 0) {
+       if (tk->addr == -EFAULT) {
                pr_info("Memory failure: Unable to find user space address %lx in %s\n",
                        page_to_pfn(p), tsk->comm);
-               tk->addr_valid = 0;
+       } else if (tk->size_shift == 0) {
+               kfree(tk);
+               return;
        }
        get_task_struct(tsk);
        tk->tsk = tsk;
@@ -366,7 +370,7 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail,
                         * make sure the process doesn't catch the
                         * signal and then access the memory. Just kill it.
                         */
-                       if (fail || tk->addr_valid == 0) {
+                       if (fail || tk->addr == -EFAULT) {
                                pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n",
                                       pfn, tk->tsk->comm, tk->tsk->pid);
                                do_send_sig_info(SIGKILL, SEND_SIG_PRIV,
@@ -1253,17 +1257,19 @@ int memory_failure(unsigned long pfn, int flags)
        if (!sysctl_memory_failure_recovery)
                panic("Memory failure on page %lx", pfn);
 
-       if (!pfn_valid(pfn)) {
+       p = pfn_to_online_page(pfn);
+       if (!p) {
+               if (pfn_valid(pfn)) {
+                       pgmap = get_dev_pagemap(pfn, NULL);
+                       if (pgmap)
+                               return memory_failure_dev_pagemap(pfn, flags,
+                                                                 pgmap);
+               }
                pr_err("Memory failure: %#lx: memory outside kernel control\n",
                        pfn);
                return -ENXIO;
        }
 
-       pgmap = get_dev_pagemap(pfn, NULL);
-       if (pgmap)
-               return memory_failure_dev_pagemap(pfn, flags, pgmap);
-
-       p = pfn_to_page(pfn);
        if (PageHuge(p))
                return memory_failure_hugetlb(pfn, flags);
        if (TestSetPageHWPoison(p)) {
index b1be791f772dc0fae4d0eef4bedc45f4312fb454..07e5c67f48a85c9c67391a4e6b0a6e3f48466d3b 100644 (file)
@@ -436,67 +436,33 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
        zone_span_writeunlock(zone);
 }
 
-static void shrink_pgdat_span(struct pglist_data *pgdat,
-                             unsigned long start_pfn, unsigned long end_pfn)
+static void update_pgdat_span(struct pglist_data *pgdat)
 {
-       unsigned long pgdat_start_pfn = pgdat->node_start_pfn;
-       unsigned long p = pgdat_end_pfn(pgdat); /* pgdat_end_pfn namespace clash */
-       unsigned long pgdat_end_pfn = p;
-       unsigned long pfn;
-       int nid = pgdat->node_id;
-
-       if (pgdat_start_pfn == start_pfn) {
-               /*
-                * If the section is smallest section in the pgdat, it need
-                * shrink pgdat->node_start_pfn and pgdat->node_spanned_pages.
-                * In this case, we find second smallest valid mem_section
-                * for shrinking zone.
-                */
-               pfn = find_smallest_section_pfn(nid, NULL, end_pfn,
-                                               pgdat_end_pfn);
-               if (pfn) {
-                       pgdat->node_start_pfn = pfn;
-                       pgdat->node_spanned_pages = pgdat_end_pfn - pfn;
-               }
-       } else if (pgdat_end_pfn == end_pfn) {
-               /*
-                * If the section is biggest section in the pgdat, it need
-                * shrink pgdat->node_spanned_pages.
-                * In this case, we find second biggest valid mem_section for
-                * shrinking zone.
-                */
-               pfn = find_biggest_section_pfn(nid, NULL, pgdat_start_pfn,
-                                              start_pfn);
-               if (pfn)
-                       pgdat->node_spanned_pages = pfn - pgdat_start_pfn + 1;
-       }
+       unsigned long node_start_pfn = 0, node_end_pfn = 0;
+       struct zone *zone;
 
-       /*
-        * If the section is not biggest or smallest mem_section in the pgdat,
-        * it only creates a hole in the pgdat. So in this case, we need not
-        * change the pgdat.
-        * But perhaps, the pgdat has only hole data. Thus it check the pgdat
-        * has only hole or not.
-        */
-       pfn = pgdat_start_pfn;
-       for (; pfn < pgdat_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_valid(pfn)))
-                       continue;
+       for (zone = pgdat->node_zones;
+            zone < pgdat->node_zones + MAX_NR_ZONES; zone++) {
+               unsigned long zone_end_pfn = zone->zone_start_pfn +
+                                            zone->spanned_pages;
 
-               if (pfn_to_nid(pfn) != nid)
+               /* No need to lock the zones, they can't change. */
+               if (!zone->spanned_pages)
                        continue;
-
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
+               if (!node_end_pfn) {
+                       node_start_pfn = zone->zone_start_pfn;
+                       node_end_pfn = zone_end_pfn;
                        continue;
+               }
 
-               /* If we find valid section, we have nothing to do */
-               return;
+               if (zone_end_pfn > node_end_pfn)
+                       node_end_pfn = zone_end_pfn;
+               if (zone->zone_start_pfn < node_start_pfn)
+                       node_start_pfn = zone->zone_start_pfn;
        }
 
-       /* The pgdat has no valid section */
-       pgdat->node_start_pfn = 0;
-       pgdat->node_spanned_pages = 0;
+       pgdat->node_start_pfn = node_start_pfn;
+       pgdat->node_spanned_pages = node_end_pfn - node_start_pfn;
 }
 
 static void __remove_zone(struct zone *zone, unsigned long start_pfn,
@@ -507,7 +473,7 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn,
 
        pgdat_resize_lock(zone->zone_pgdat, &flags);
        shrink_zone_span(zone, start_pfn, start_pfn + nr_pages);
-       shrink_pgdat_span(pgdat, start_pfn, start_pfn + nr_pages);
+       update_pgdat_span(pgdat);
        pgdat_resize_unlock(zone->zone_pgdat, &flags);
 }
 
index 32c79b51af8678bead9702a1447b7def15050ad9..03ccbdfeb6972018eea805d5848b4a9b3a3ba4cd 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/xarray.h>
 
 static DEFINE_XARRAY(pgmap_array);
-#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
-#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
 
 #ifdef CONFIG_DEV_PAGEMAP_OPS
 DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
@@ -105,6 +103,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
 void memunmap_pages(struct dev_pagemap *pgmap)
 {
        struct resource *res = &pgmap->res;
+       struct page *first_page;
        unsigned long pfn;
        int nid;
 
@@ -113,14 +112,16 @@ void memunmap_pages(struct dev_pagemap *pgmap)
                put_page(pfn_to_page(pfn));
        dev_pagemap_cleanup(pgmap);
 
+       /* make sure to access a memmap that was actually initialized */
+       first_page = pfn_to_page(pfn_first(pgmap));
+
        /* pages are dead and unused, undo the arch mapping */
-       nid = page_to_nid(pfn_to_page(PHYS_PFN(res->start)));
+       nid = page_to_nid(first_page);
 
        mem_hotplug_begin();
        if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
-               pfn = PHYS_PFN(res->start);
-               __remove_pages(page_zone(pfn_to_page(pfn)), pfn,
-                                PHYS_PFN(resource_size(res)), NULL);
+               __remove_pages(page_zone(first_page), PHYS_PFN(res->start),
+                              PHYS_PFN(resource_size(res)), NULL);
        } else {
                arch_remove_memory(nid, res->start, resource_size(res),
                                pgmap_altmap(pgmap));
index 7fde88695f35d62c1f22ffe1cc724ffe85ea92e9..9a889e456168bcc72957dc79fe4c7c234243aa73 100644 (file)
@@ -180,7 +180,7 @@ int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
                                        mn->ops->invalidate_range_start, _ret,
                                        !mmu_notifier_range_blockable(range) ? "non-" : "");
                                WARN_ON(mmu_notifier_range_blockable(range) ||
-                                       ret != -EAGAIN);
+                                       _ret != -EAGAIN);
                                ret = _ret;
                        }
                }
index 15c2050c629b1d8aacb2f36aac7ac09c54c95449..f391c0c4ed1deb2299a7552dcd27c8d68e4b1412 100644 (file)
@@ -1175,11 +1175,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
                debug_check_no_obj_freed(page_address(page),
                                           PAGE_SIZE << order);
        }
-       arch_free_page(page, order);
        if (want_init_on_free())
                kernel_init_free_pages(page, 1 << order);
 
        kernel_poison_pages(page, 1 << order, 0);
+       /*
+        * arch_free_page() can make the page's contents inaccessible.  s390
+        * does this.  So nothing which can access the page's contents should
+        * happen after this.
+        */
+       arch_free_page(page, order);
+
        if (debug_pagealloc_enabled())
                kernel_map_pages(page, 1 << order, 0);
 
@@ -1941,6 +1947,14 @@ void __init page_alloc_init_late(void)
        /* Block until all are initialised */
        wait_for_completion(&pgdat_init_all_done_comp);
 
+       /*
+        * The number of managed pages has changed due to the initialisation
+        * so the pcpu batch and high limits needs to be updated or the limits
+        * will be artificially small.
+        */
+       for_each_populated_zone(zone)
+               zone_pcp_update(zone);
+
        /*
         * We initialized the rest of the deferred pages.  Permanently disable
         * on-demand struct page initialization.
@@ -3714,10 +3728,6 @@ try_this_zone:
 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
 {
        unsigned int filter = SHOW_MEM_FILTER_NODES;
-       static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
-
-       if (!__ratelimit(&show_mem_rs))
-               return;
 
        /*
         * This documents exceptions given to allocations in certain
@@ -3738,8 +3748,7 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
 {
        struct va_format vaf;
        va_list args;
-       static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
-                                     DEFAULT_RATELIMIT_BURST);
+       static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
 
        if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
                return;
@@ -4467,12 +4476,14 @@ retry_cpuset:
                if (page)
                        goto got_pg;
 
-                if (order >= pageblock_order && (gfp_mask & __GFP_IO)) {
+                if (order >= pageblock_order && (gfp_mask & __GFP_IO) &&
+                    !(gfp_mask & __GFP_RETRY_MAYFAIL)) {
                        /*
                         * If allocating entire pageblock(s) and compaction
                         * failed because all zones are below low watermarks
                         * or is prohibited because it recently failed at this
-                        * order, fail immediately.
+                        * order, fail immediately unless the allocator has
+                        * requested compaction and reclaim retry.
                         *
                         * Reclaim is
                         *  - potentially very expensive because zones are far
@@ -8506,7 +8517,6 @@ void free_contig_range(unsigned long pfn, unsigned int nr_pages)
        WARN(count != 0, "%d pages are still in use!\n", count);
 }
 
-#ifdef CONFIG_MEMORY_HOTPLUG
 /*
  * The zone indicated has a new number of managed_pages; batch sizes and percpu
  * page high values need to be recalulated.
@@ -8520,7 +8530,6 @@ void __meminit zone_pcp_update(struct zone *zone)
                                per_cpu_ptr(zone->pageset, cpu));
        mutex_unlock(&pcp_batch_high_lock);
 }
-#endif
 
 void zone_pcp_reset(struct zone *zone)
 {
index 5f5769c7db3b2f87b78ebc0b07425edecc7d0846..4ade843ff58846cc9f73d3cb54ce78849643ca71 100644 (file)
@@ -67,8 +67,9 @@ static struct page_ext_operations *page_ext_ops[] = {
 #endif
 };
 
+unsigned long page_ext_size = sizeof(struct page_ext);
+
 static unsigned long total_usage;
-static unsigned long extra_mem;
 
 static bool __init invoke_need_callbacks(void)
 {
@@ -78,9 +79,8 @@ static bool __init invoke_need_callbacks(void)
 
        for (i = 0; i < entries; i++) {
                if (page_ext_ops[i]->need && page_ext_ops[i]->need()) {
-                       page_ext_ops[i]->offset = sizeof(struct page_ext) +
-                                               extra_mem;
-                       extra_mem += page_ext_ops[i]->size;
+                       page_ext_ops[i]->offset = page_ext_size;
+                       page_ext_size += page_ext_ops[i]->size;
                        need = true;
                }
        }
@@ -99,14 +99,9 @@ static void __init invoke_init_callbacks(void)
        }
 }
 
-static unsigned long get_entry_size(void)
-{
-       return sizeof(struct page_ext) + extra_mem;
-}
-
 static inline struct page_ext *get_entry(void *base, unsigned long index)
 {
-       return base + get_entry_size() * index;
+       return base + page_ext_size * index;
 }
 
 #if !defined(CONFIG_SPARSEMEM)
@@ -156,7 +151,7 @@ static int __init alloc_node_page_ext(int nid)
                !IS_ALIGNED(node_end_pfn(nid), MAX_ORDER_NR_PAGES))
                nr_pages += MAX_ORDER_NR_PAGES;
 
-       table_size = get_entry_size() * nr_pages;
+       table_size = page_ext_size * nr_pages;
 
        base = memblock_alloc_try_nid(
                        table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS),
@@ -234,7 +229,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
        if (section->page_ext)
                return 0;
 
-       table_size = get_entry_size() * PAGES_PER_SECTION;
+       table_size = page_ext_size * PAGES_PER_SECTION;
        base = alloc_page_ext(table_size, nid);
 
        /*
@@ -254,7 +249,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid)
         * we need to apply a mask.
         */
        pfn &= PAGE_SECTION_MASK;
-       section->page_ext = (void *)base - get_entry_size() * pfn;
+       section->page_ext = (void *)base - page_ext_size * pfn;
        total_usage += table_size;
        return 0;
 }
@@ -267,7 +262,7 @@ static void free_page_ext(void *addr)
                struct page *page = virt_to_page(addr);
                size_t table_size;
 
-               table_size = get_entry_size() * PAGES_PER_SECTION;
+               table_size = page_ext_size * PAGES_PER_SECTION;
 
                BUG_ON(PageReserved(page));
                kmemleak_free(addr);
index dee931184788399c920646cef75e29e8fd347186..18ecde9f45b24c069172045ef2f829fc94a1133f 100644 (file)
@@ -24,12 +24,10 @@ struct page_owner {
        short last_migrate_reason;
        gfp_t gfp_mask;
        depot_stack_handle_t handle;
-#ifdef CONFIG_DEBUG_PAGEALLOC
        depot_stack_handle_t free_handle;
-#endif
 };
 
-static bool page_owner_disabled = true;
+static bool page_owner_enabled = false;
 DEFINE_STATIC_KEY_FALSE(page_owner_inited);
 
 static depot_stack_handle_t dummy_handle;
@@ -44,7 +42,7 @@ static int __init early_page_owner_param(char *buf)
                return -EINVAL;
 
        if (strcmp(buf, "on") == 0)
-               page_owner_disabled = false;
+               page_owner_enabled = true;
 
        return 0;
 }
@@ -52,10 +50,7 @@ early_param("page_owner", early_page_owner_param);
 
 static bool need_page_owner(void)
 {
-       if (page_owner_disabled)
-               return false;
-
-       return true;
+       return page_owner_enabled;
 }
 
 static __always_inline depot_stack_handle_t create_dummy_stack(void)
@@ -84,7 +79,7 @@ static noinline void register_early_stack(void)
 
 static void init_page_owner(void)
 {
-       if (page_owner_disabled)
+       if (!page_owner_enabled)
                return;
 
        register_dummy_stack();
@@ -148,25 +143,19 @@ void __reset_page_owner(struct page *page, unsigned int order)
 {
        int i;
        struct page_ext *page_ext;
-#ifdef CONFIG_DEBUG_PAGEALLOC
        depot_stack_handle_t handle = 0;
        struct page_owner *page_owner;
 
-       if (debug_pagealloc_enabled())
-               handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
-#endif
+       handle = save_stack(GFP_NOWAIT | __GFP_NOWARN);
 
+       page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
        for (i = 0; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
-               if (unlikely(!page_ext))
-                       continue;
-               __clear_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
-#ifdef CONFIG_DEBUG_PAGEALLOC
-               if (debug_pagealloc_enabled()) {
-                       page_owner = get_page_owner(page_ext);
-                       page_owner->free_handle = handle;
-               }
-#endif
+               __clear_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
+               page_owner = get_page_owner(page_ext);
+               page_owner->free_handle = handle;
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -184,9 +173,9 @@ static inline void __set_page_owner_handle(struct page *page,
                page_owner->gfp_mask = gfp_mask;
                page_owner->last_migrate_reason = -1;
                __set_bit(PAGE_EXT_OWNER, &page_ext->flags);
-               __set_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags);
+               __set_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags);
 
-               page_ext = lookup_page_ext(page + i);
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -224,12 +213,10 @@ void __split_page_owner(struct page *page, unsigned int order)
        if (unlikely(!page_ext))
                return;
 
-       page_owner = get_page_owner(page_ext);
-       page_owner->order = 0;
-       for (i = 1; i < (1 << order); i++) {
-               page_ext = lookup_page_ext(page + i);
+       for (i = 0; i < (1 << order); i++) {
                page_owner = get_page_owner(page_ext);
                page_owner->order = 0;
+               page_ext = page_ext_next(page_ext);
        }
 }
 
@@ -260,7 +247,7 @@ void __copy_page_owner(struct page *oldpage, struct page *newpage)
         * the new page, which will be freed.
         */
        __set_bit(PAGE_EXT_OWNER, &new_ext->flags);
-       __set_bit(PAGE_EXT_OWNER_ACTIVE, &new_ext->flags);
+       __set_bit(PAGE_EXT_OWNER_ALLOCATED, &new_ext->flags);
 }
 
 void pagetypeinfo_showmixedcount_print(struct seq_file *m,
@@ -284,7 +271,8 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
         * not matter as the mixed block count will still be correct
         */
        for (; pfn < end_pfn; ) {
-               if (!pfn_valid(pfn)) {
+               page = pfn_to_online_page(pfn);
+               if (!page) {
                        pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES);
                        continue;
                }
@@ -292,13 +280,13 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               page = pfn_to_page(pfn);
                pageblock_mt = get_pageblock_migratetype(page);
 
                for (; pfn < block_end_pfn; pfn++) {
                        if (!pfn_valid_within(pfn))
                                continue;
 
+                       /* The pageblock is online, no need to recheck. */
                        page = pfn_to_page(pfn);
 
                        if (page_zone(page) != zone)
@@ -320,7 +308,7 @@ void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                        if (unlikely(!page_ext))
                                continue;
 
-                       if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+                       if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                                continue;
 
                        page_owner = get_page_owner(page_ext);
@@ -435,7 +423,7 @@ void __dump_page_owner(struct page *page)
                return;
        }
 
-       if (test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+       if (test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                pr_alert("page_owner tracks the page as allocated\n");
        else
                pr_alert("page_owner tracks the page as freed\n");
@@ -451,7 +439,6 @@ void __dump_page_owner(struct page *page)
                stack_trace_print(entries, nr_entries, 0);
        }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
        handle = READ_ONCE(page_owner->free_handle);
        if (!handle) {
                pr_alert("page_owner free stack trace missing\n");
@@ -460,7 +447,6 @@ void __dump_page_owner(struct page *page)
                pr_alert("page last free stack trace:\n");
                stack_trace_print(entries, nr_entries, 0);
        }
-#endif
 
        if (page_owner->last_migrate_reason != -1)
                pr_alert("page has been migrated, last migrate reason: %s\n",
@@ -527,7 +513,7 @@ read_page_owner(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                 * Although we do have the info about past allocation of free
                 * pages, it's not relevant for current memory usage.
                 */
-               if (!test_bit(PAGE_EXT_OWNER_ACTIVE, &page_ext->flags))
+               if (!test_bit(PAGE_EXT_OWNER_ALLOCATED, &page_ext->flags))
                        continue;
 
                page_owner = get_page_owner(page_ext);
index d9a23bb773bf7997773cb94952fbf73c313a0bf3..0c7b2a9400d4a286799d76381f8188250bf72528 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -61,6 +61,7 @@
 #include <linux/mmu_notifier.h>
 #include <linux/migrate.h>
 #include <linux/hugetlb.h>
+#include <linux/huge_mm.h>
 #include <linux/backing-dev.h>
 #include <linux/page_idle.h>
 #include <linux/memremap.h>
index cd570cc79c76ab9873ce123dd7fd1d0e4412c0bc..220be9fa2c417c4cfccacde8b799d975ac730f06 100644 (file)
@@ -3482,6 +3482,12 @@ static int shmem_parse_options(struct fs_context *fc, void *data)
 {
        char *options = data;
 
+       if (options) {
+               int err = security_sb_eat_lsm_opts(options, &fc->security);
+               if (err)
+                       return err;
+       }
+
        while (options != NULL) {
                char *this_char = options;
                for (;;) {
index 3ce12481b1dccfb01112ca9b168a442cc81d5d4f..b3fe97fd665418adeb0b2d75419b5bd425db945e 100644 (file)
@@ -33,7 +33,7 @@ __meminit void page_alloc_shuffle(enum mm_shuffle_ctl ctl)
 }
 
 static bool shuffle_param;
-extern int shuffle_show(char *buffer, const struct kernel_param *kp)
+static int shuffle_show(char *buffer, const struct kernel_param *kp)
 {
        return sprintf(buffer, "%c\n", test_bit(SHUFFLE_ENABLE, &shuffle_state)
                        ? 'Y' : 'N');
index 9df370558e5d25fbfb073e58c666b72bcaf8a165..66e5d8032bae4a046cd2b30293b5bf7edadad485 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -4206,9 +4206,12 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
 
 /**
  * __ksize -- Uninstrumented ksize.
+ * @objp: pointer to the object
  *
  * Unlike ksize(), __ksize() is uninstrumented, and does not provide the same
  * safety checks as ksize() with KASAN instrumentation enabled.
+ *
+ * Return: size of the actual memory used by @objp in bytes
  */
 size_t __ksize(const void *objp)
 {
index 68e455f2b698cbfab04ac4a1d61df3c1a32cfe9e..b2b01694dc43f63259fa3ee1e1b59ef3505f794b 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -323,8 +323,8 @@ static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  * Expects a pointer to a slab page. Please note, that PageSlab() check
  * isn't sufficient, as it returns true also for tail compound slab pages,
  * which do not have slab_cache pointer set.
- * So this function assumes that the page can pass PageHead() and PageSlab()
- * checks.
+ * So this function assumes that the page can pass PageSlab() && !PageTail()
+ * check.
  *
  * The kmem_cache can be reparented asynchronously. The caller must ensure
  * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
index 6491c3a418053870ae600830f82fa7f72e16a58d..f9fb27b4c84398c49eee6b9a46e006d5c2c28f95 100644 (file)
@@ -178,10 +178,13 @@ static int init_memcg_params(struct kmem_cache *s,
 
 static void destroy_memcg_params(struct kmem_cache *s)
 {
-       if (is_root_cache(s))
+       if (is_root_cache(s)) {
                kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
-       else
+       } else {
+               mem_cgroup_put(s->memcg_params.memcg);
+               WRITE_ONCE(s->memcg_params.memcg, NULL);
                percpu_ref_exit(&s->memcg_params.refcnt);
+       }
 }
 
 static void free_memcg_params(struct rcu_head *rcu)
@@ -253,8 +256,6 @@ static void memcg_unlink_cache(struct kmem_cache *s)
        } else {
                list_del(&s->memcg_params.children_node);
                list_del(&s->memcg_params.kmem_caches_node);
-               mem_cgroup_put(s->memcg_params.memcg);
-               WRITE_ONCE(s->memcg_params.memcg, NULL);
        }
 }
 #else
@@ -1030,10 +1031,19 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name,
                unsigned int useroffset, unsigned int usersize)
 {
        int err;
+       unsigned int align = ARCH_KMALLOC_MINALIGN;
 
        s->name = name;
        s->size = s->object_size = size;
-       s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
+
+       /*
+        * For power of two sizes, guarantee natural alignment for kmalloc
+        * caches, regardless of SL*B debugging options.
+        */
+       if (is_power_of_2(size))
+               align = max(align, size);
+       s->align = calculate_alignment(flags, align, size);
+
        s->useroffset = useroffset;
        s->usersize = usersize;
 
@@ -1287,12 +1297,16 @@ void __init create_kmalloc_caches(slab_flags_t flags)
  */
 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 {
-       void *ret;
+       void *ret = NULL;
        struct page *page;
 
        flags |= __GFP_COMP;
        page = alloc_pages(flags, order);
-       ret = page ? page_address(page) : NULL;
+       if (likely(page)) {
+               ret = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
        ret = kasan_kmalloc_large(ret, size, flags);
        /* As ret might get tagged, call kmemleak hook after KASAN. */
        kmemleak_alloc(ret, size, 1, flags);
index cf377beab96212bc8e717eaabfac6b263b9108cf..fa53e9f738935605acf538cdcbbf76f41f410169 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -190,7 +190,7 @@ static int slob_last(slob_t *s)
 
 static void *slob_new_pages(gfp_t gfp, int order, int node)
 {
-       void *page;
+       struct page *page;
 
 #ifdef CONFIG_NUMA
        if (node != NUMA_NO_NODE)
@@ -202,14 +202,21 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
        if (!page)
                return NULL;
 
+       mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                           1 << order);
        return page_address(page);
 }
 
 static void slob_free_pages(void *b, int order)
 {
+       struct page *sp = virt_to_page(b);
+
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       free_pages((unsigned long)b, order);
+
+       mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                           -(1 << order));
+       __free_pages(sp, order);
 }
 
 /*
@@ -217,6 +224,7 @@ static void slob_free_pages(void *b, int order)
  * @sp: Page to look in.
  * @size: Size of the allocation.
  * @align: Allocation alignment.
+ * @align_offset: Offset in the allocated block that will be aligned.
  * @page_removed_from_list: Return parameter.
  *
  * Tries to find a chunk of memory at least @size bytes big within @page.
@@ -227,7 +235,7 @@ static void slob_free_pages(void *b, int order)
  *         true (set to false otherwise).
  */
 static void *slob_page_alloc(struct page *sp, size_t size, int align,
-                            bool *page_removed_from_list)
+                             int align_offset, bool *page_removed_from_list)
 {
        slob_t *prev, *cur, *aligned = NULL;
        int delta = 0, units = SLOB_UNITS(size);
@@ -236,8 +244,17 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
        for (prev = NULL, cur = sp->freelist; ; prev = cur, cur = slob_next(cur)) {
                slobidx_t avail = slob_units(cur);
 
+               /*
+                * 'aligned' will hold the address of the slob block so that the
+                * address 'aligned'+'align_offset' is aligned according to the
+                * 'align' parameter. This is for kmalloc() which prepends the
+                * allocated block with its size, so that the block itself is
+                * aligned when needed.
+                */
                if (align) {
-                       aligned = (slob_t *)ALIGN((unsigned long)cur, align);
+                       aligned = (slob_t *)
+                               (ALIGN((unsigned long)cur + align_offset, align)
+                                - align_offset);
                        delta = aligned - cur;
                }
                if (avail >= units + delta) { /* room enough? */
@@ -281,7 +298,8 @@ static void *slob_page_alloc(struct page *sp, size_t size, int align,
 /*
  * slob_alloc: entry point into the slob allocator.
  */
-static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
+static void *slob_alloc(size_t size, gfp_t gfp, int align, int node,
+                                                       int align_offset)
 {
        struct page *sp;
        struct list_head *slob_list;
@@ -312,7 +330,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                if (sp->units < SLOB_UNITS(size))
                        continue;
 
-               b = slob_page_alloc(sp, size, align, &page_removed_from_list);
+               b = slob_page_alloc(sp, size, align, align_offset, &page_removed_from_list);
                if (!b)
                        continue;
 
@@ -349,7 +367,7 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
                INIT_LIST_HEAD(&sp->slab_list);
                set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
                set_slob_page_free(sp, slob_list);
-               b = slob_page_alloc(sp, size, align, &_unused);
+               b = slob_page_alloc(sp, size, align, align_offset, &_unused);
                BUG_ON(!b);
                spin_unlock_irqrestore(&slob_lock, flags);
        }
@@ -451,7 +469,7 @@ static __always_inline void *
 __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
 {
        unsigned int *m;
-       int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
+       int minalign = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
        void *ret;
 
        gfp &= gfp_allowed_mask;
@@ -459,19 +477,28 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
        fs_reclaim_acquire(gfp);
        fs_reclaim_release(gfp);
 
-       if (size < PAGE_SIZE - align) {
+       if (size < PAGE_SIZE - minalign) {
+               int align = minalign;
+
+               /*
+                * For power of two sizes, guarantee natural alignment for
+                * kmalloc()'d objects.
+                */
+               if (is_power_of_2(size))
+                       align = max(minalign, (int) size);
+
                if (!size)
                        return ZERO_SIZE_PTR;
 
-               m = slob_alloc(size + align, gfp, align, node);
+               m = slob_alloc(size + minalign, gfp, align, node, minalign);
 
                if (!m)
                        return NULL;
                *m = size;
-               ret = (void *)m + align;
+               ret = (void *)m + minalign;
 
                trace_kmalloc_node(caller, ret,
-                                  size, size + align, gfp, node);
+                                  size, size + minalign, gfp, node);
        } else {
                unsigned int order = get_order(size);
 
@@ -521,8 +548,13 @@ void kfree(const void *block)
                int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
                unsigned int *m = (unsigned int *)(block - align);
                slob_free(m, *m + align);
-       } else
-               __free_pages(sp, compound_order(sp));
+       } else {
+               unsigned int order = compound_order(sp);
+               mod_node_page_state(page_pgdat(sp), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(sp, order);
+
+       }
 }
 EXPORT_SYMBOL(kfree);
 
@@ -567,7 +599,7 @@ static void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
        fs_reclaim_release(flags);
 
        if (c->size < PAGE_SIZE) {
-               b = slob_alloc(c->size, flags, c->align, node);
+               b = slob_alloc(c->size, flags, c->align, node, 0);
                trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
                                            SLOB_UNITS(c->size) * SLOB_UNIT,
                                            flags, node);
index 42c1b3af3c9805fd6ae0e7028aa614fac979f433..b25c807a111f43fa77cd1ab3b10218f9323627e2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2671,6 +2671,17 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
        return p;
 }
 
+/*
+ * If the object has been wiped upon free, make sure it's fully initialized by
+ * zeroing out freelist pointer.
+ */
+static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
+                                                  void *obj)
+{
+       if (unlikely(slab_want_init_on_free(s)) && obj)
+               memset((void *)((char *)obj + s->offset), 0, sizeof(void *));
+}
+
 /*
  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
  * have the fastpath folded into their functions. So no function call
@@ -2759,12 +2770,8 @@ redo:
                prefetch_freepointer(s, next_object);
                stat(s, ALLOC_FASTPATH);
        }
-       /*
-        * If the object has been wiped upon free, make sure it's fully
-        * initialized by zeroing out freelist pointer.
-        */
-       if (unlikely(slab_want_init_on_free(s)) && object)
-               memset(object + s->offset, 0, sizeof(void *));
+
+       maybe_wipe_obj_freeptr(s, object);
 
        if (unlikely(slab_want_init_on_alloc(gfpflags, s)) && object)
                memset(object, 0, s->object_size);
@@ -3178,10 +3185,13 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
                                goto error;
 
                        c = this_cpu_ptr(s->cpu_slab);
+                       maybe_wipe_obj_freeptr(s, p[i]);
+
                        continue; /* goto for-loop */
                }
                c->freelist = get_freepointer(s, object);
                p[i] = object;
+               maybe_wipe_obj_freeptr(s, p[i]);
        }
        c->tid = next_tid(c->tid);
        local_irq_enable();
@@ -3821,11 +3831,15 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
        struct page *page;
        void *ptr = NULL;
+       unsigned int order = get_order(size);
 
        flags |= __GFP_COMP;
-       page = alloc_pages_node(node, flags, get_order(size));
-       if (page)
+       page = alloc_pages_node(node, flags, order);
+       if (page) {
                ptr = page_address(page);
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   1 << order);
+       }
 
        return kmalloc_large_node_hook(ptr, size, flags);
 }
@@ -3951,9 +3965,13 @@ void kfree(const void *x)
 
        page = virt_to_head_page(x);
        if (unlikely(!PageSlab(page))) {
+               unsigned int order = compound_order(page);
+
                BUG_ON(!PageCompound(page));
                kfree_hook(object);
-               __free_pages(page, compound_order(page));
+               mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE,
+                                   -(1 << order));
+               __free_pages(page, order);
                return;
        }
        slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
@@ -4838,7 +4856,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                }
        }
 
-       get_online_mems();
+       /*
+        * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
+        * already held which will conflict with an existing lock order:
+        *
+        * mem_hotplug_lock->slab_mutex->kernfs_mutex
+        *
+        * We don't really need mem_hotplug_lock (to hold off
+        * slab_mem_going_offline_callback) here because slab's memory hot
+        * unplug code doesn't destroy the kmem_cache->node[] data.
+        */
+
 #ifdef CONFIG_SLUB_DEBUG
        if (flags & SO_ALL) {
                struct kmem_cache_node *n;
@@ -4879,7 +4907,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                        x += sprintf(buf + x, " N%d=%lu",
                                        node, nodes[node]);
 #endif
-       put_online_mems();
        kfree(nodes);
        return x + sprintf(buf + x, "\n");
 }
index bf32de9e666b5697f74cdbe319d4c783237065ab..f6891c1992b181cfd2c48b9b84dcd5c0ee1244ae 100644 (file)
@@ -219,7 +219,7 @@ static inline unsigned long first_present_section_nr(void)
        return next_present_section_nr(-1);
 }
 
-void subsection_mask_set(unsigned long *map, unsigned long pfn,
+static void subsection_mask_set(unsigned long *map, unsigned long pfn,
                unsigned long nr_pages)
 {
        int idx = subsection_map_index(pfn);
index 8563339041f68c4a602ab364f913636071ccda70..dd9ebc1da35664d04dd508539b34fb96dc764f28 100644 (file)
@@ -592,6 +592,16 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                        unlock_page(page);
                                        continue;
                                }
+
+                               /* Take a pin outside pagevec */
+                               get_page(page);
+
+                               /*
+                                * Drop extra pins before trying to invalidate
+                                * the huge page.
+                                */
+                               pagevec_remove_exceptionals(&pvec);
+                               pagevec_release(&pvec);
                        }
 
                        ret = invalidate_inode_page(page);
@@ -602,6 +612,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                         */
                        if (!ret)
                                deactivate_file_page(page);
+                       if (PageTransHuge(page))
+                               put_page(page);
                        count += ret;
                }
                pagevec_remove_exceptionals(&pvec);
index f3b50811497ad6506ca3eb7345208e1d52ce2679..4bac22fe1aa214c32bba6f7d5f46f82b5ea208ba 100644 (file)
@@ -355,6 +355,9 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
  * "hierarchy" or "local").
  *
  * To be used as memcg event method.
+ *
+ * Return: 0 on success, -ENOMEM on memory failure or -EINVAL if @args could
+ * not be parsed.
  */
 int vmpressure_register_event(struct mem_cgroup *memcg,
                              struct eventfd_ctx *eventfd, const char *args)
@@ -362,7 +365,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        struct vmpressure *vmpr = memcg_to_vmpressure(memcg);
        struct vmpressure_event *ev;
        enum vmpressure_modes mode = VMPRESSURE_NO_PASSTHROUGH;
-       enum vmpressure_levels level = -1;
+       enum vmpressure_levels level;
        char *spec, *spec_orig;
        char *token;
        int ret = 0;
@@ -375,20 +378,18 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
 
        /* Find required level */
        token = strsep(&spec, ",");
-       level = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
-       if (level < 0) {
-               ret = level;
+       ret = match_string(vmpressure_str_levels, VMPRESSURE_NUM_LEVELS, token);
+       if (ret < 0)
                goto out;
-       }
+       level = ret;
 
        /* Find optional mode */
        token = strsep(&spec, ",");
        if (token) {
-               mode = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
-               if (mode < 0) {
-                       ret = mode;
+               ret = match_string(vmpressure_str_modes, VMPRESSURE_NUM_MODES, token);
+               if (ret < 0)
                        goto out;
-               }
+               mode = ret;
        }
 
        ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -404,6 +405,7 @@ int vmpressure_register_event(struct mem_cgroup *memcg,
        mutex_lock(&vmpr->events_lock);
        list_add(&ev->node, &vmpr->events);
        mutex_unlock(&vmpr->events_lock);
+       ret = 0;
 out:
        kfree(spec_orig);
        return ret;
index e5d52d6a24aff1c7fccd292bb8a2455e1eec1d52..ee4eecc7e1c2177041d00d8eb8d62ca64647206a 100644 (file)
@@ -351,12 +351,13 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
  */
 unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
 {
-       unsigned long lru_size;
+       unsigned long lru_size = 0;
        int zid;
 
-       if (!mem_cgroup_disabled())
-               lru_size = lruvec_page_state_local(lruvec, NR_LRU_BASE + lru);
-       else
+       if (!mem_cgroup_disabled()) {
+               for (zid = 0; zid < MAX_NR_ZONES; zid++)
+                       lru_size += mem_cgroup_get_zone_lru_size(lruvec, lru, zid);
+       } else
                lru_size = node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
 
        for (zid = zone_idx + 1; zid < MAX_NR_ZONES; zid++) {
@@ -932,10 +933,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
         * Note that if SetPageDirty is always performed via set_page_dirty,
         * and thus under the i_pages lock, then this ordering is not required.
         */
-       if (unlikely(PageTransHuge(page)) && PageSwapCache(page))
-               refcount = 1 + HPAGE_PMD_NR;
-       else
-               refcount = 2;
+       refcount = 1 + compound_nr(page);
        if (!page_ref_freeze(page, refcount))
                goto cannot_free;
        /* note: atomic_cmpxchg in page_ref_freeze provides the smp_rmb */
@@ -2459,17 +2457,70 @@ out:
        *lru_pages = 0;
        for_each_evictable_lru(lru) {
                int file = is_file_lru(lru);
-               unsigned long size;
+               unsigned long lruvec_size;
                unsigned long scan;
+               unsigned long protection;
+
+               lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
+               protection = mem_cgroup_protection(memcg,
+                                                  sc->memcg_low_reclaim);
+
+               if (protection) {
+                       /*
+                        * Scale a cgroup's reclaim pressure by proportioning
+                        * its current usage to its memory.low or memory.min
+                        * setting.
+                        *
+                        * This is important, as otherwise scanning aggression
+                        * becomes extremely binary -- from nothing as we
+                        * approach the memory protection threshold, to totally
+                        * nominal as we exceed it.  This results in requiring
+                        * setting extremely liberal protection thresholds. It
+                        * also means we simply get no protection at all if we
+                        * set it too low, which is not ideal.
+                        *
+                        * If there is any protection in place, we reduce scan
+                        * pressure by how much of the total memory used is
+                        * within protection thresholds.
+                        *
+                        * There is one special case: in the first reclaim pass,
+                        * we skip over all groups that are within their low
+                        * protection. If that fails to reclaim enough pages to
+                        * satisfy the reclaim goal, we come back and override
+                        * the best-effort low protection. However, we still
+                        * ideally want to honor how well-behaved groups are in
+                        * that case instead of simply punishing them all
+                        * equally. As such, we reclaim them based on how much
+                        * memory they are using, reducing the scan pressure
+                        * again by how much of the total memory used is under
+                        * hard protection.
+                        */
+                       unsigned long cgroup_size = mem_cgroup_size(memcg);
+
+                       /* Avoid TOCTOU with earlier protection check */
+                       cgroup_size = max(cgroup_size, protection);
+
+                       scan = lruvec_size - lruvec_size * protection /
+                               cgroup_size;
+
+                       /*
+                        * Minimally target SWAP_CLUSTER_MAX pages to keep
+                        * reclaim moving forwards, avoiding decremeting
+                        * sc->priority further than desirable.
+                        */
+                       scan = max(scan, SWAP_CLUSTER_MAX);
+               } else {
+                       scan = lruvec_size;
+               }
+
+               scan >>= sc->priority;
 
-               size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
-               scan = size >> sc->priority;
                /*
                 * If the cgroup's already been deleted, make sure to
                 * scrape out the remaining cache.
                 */
                if (!scan && !mem_cgroup_online(memcg))
-                       scan = min(size, SWAP_CLUSTER_MAX);
+                       scan = min(lruvec_size, SWAP_CLUSTER_MAX);
 
                switch (scan_balance) {
                case SCAN_EQUAL:
@@ -2489,7 +2540,7 @@ out:
                case SCAN_ANON:
                        /* Scan one type exclusively */
                        if ((scan_balance == SCAN_FILE) != file) {
-                               size = 0;
+                               lruvec_size = 0;
                                scan = 0;
                        }
                        break;
@@ -2498,7 +2549,7 @@ out:
                        BUG();
                }
 
-               *lru_pages += size;
+               *lru_pages += lruvec_size;
                nr[lru] = scan;
        }
 }
@@ -2742,6 +2793,13 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
                                memcg_memory_event(memcg, MEMCG_LOW);
                                break;
                        case MEMCG_PROT_NONE:
+                               /*
+                                * All protection thresholds breached. We may
+                                * still choose to vary the scan pressure
+                                * applied based on by how much the cgroup in
+                                * question has exceeded its protection
+                                * thresholds (see get_scan_count).
+                                */
                                break;
                        }
 
index 6afc892a148a0408a901695d42529a2a1be99939..a8222041bd44d648a4c8b5a1e5e345314efb050e 100644 (file)
@@ -1383,12 +1383,29 @@ static void pagetypeinfo_showfree_print(struct seq_file *m,
                        unsigned long freecount = 0;
                        struct free_area *area;
                        struct list_head *curr;
+                       bool overflow = false;
 
                        area = &(zone->free_area[order]);
 
-                       list_for_each(curr, &area->free_list[mtype])
-                               freecount++;
-                       seq_printf(m, "%6lu ", freecount);
+                       list_for_each(curr, &area->free_list[mtype]) {
+                               /*
+                                * Cap the free_list iteration because it might
+                                * be really large and we are under a spinlock
+                                * so a long time spent here could trigger a
+                                * hard lockup detector. Anyway this is a
+                                * debugging tool so knowing there is a handful
+                                * of pages of this order should be more than
+                                * sufficient.
+                                */
+                               if (++freecount >= 100000) {
+                                       overflow = true;
+                                       break;
+                               }
+                       }
+                       seq_printf(m, "%s%6lu ", overflow ? ">" : "", freecount);
+                       spin_unlock_irq(&zone->lock);
+                       cond_resched();
+                       spin_lock_irq(&zone->lock);
                }
                seq_putc(m, '\n');
        }
@@ -1972,7 +1989,7 @@ void __init init_mm_internals(void)
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create_seq("buddyinfo", 0444, NULL, &fragmentation_op);
-       proc_create_seq("pagetypeinfo", 0444, NULL, &pagetypeinfo_op);
+       proc_create_seq("pagetypeinfo", 0400, NULL, &pagetypeinfo_op);
        proc_create_seq("vmstat", 0444, NULL, &vmstat_op);
        proc_create_seq("zoneinfo", 0444, NULL, &zoneinfo_op);
 #endif
index 05bdf90646e7937ab8435327f785afbb0deaabf1..6d3d3f698ebb9f91db74662b25fe68b0bc86650b 100644 (file)
@@ -998,9 +998,11 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
        struct z3fold_header *zhdr;
        struct page *page;
        enum buddy bud;
+       bool page_claimed;
 
        zhdr = handle_to_z3fold_header(handle);
        page = virt_to_page(zhdr);
+       page_claimed = test_and_set_bit(PAGE_CLAIMED, &page->private);
 
        if (test_bit(PAGE_HEADLESS, &page->private)) {
                /* if a headless page is under reclaim, just leave.
@@ -1008,7 +1010,7 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                 * has not been set before, we release this page
                 * immediately so we don't care about its value any more.
                 */
-               if (!test_and_set_bit(PAGE_CLAIMED, &page->private)) {
+               if (!page_claimed) {
                        spin_lock(&pool->lock);
                        list_del(&page->lru);
                        spin_unlock(&pool->lock);
@@ -1044,13 +1046,15 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                atomic64_dec(&pool->pages_nr);
                return;
        }
-       if (test_bit(PAGE_CLAIMED, &page->private)) {
+       if (page_claimed) {
+               /* the page has not been claimed by us */
                z3fold_page_unlock(zhdr);
                return;
        }
        if (unlikely(PageIsolated(page)) ||
            test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
                z3fold_page_unlock(zhdr);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        if (zhdr->cpu < 0 || !cpu_online(zhdr->cpu)) {
@@ -1060,10 +1064,12 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
                zhdr->cpu = -1;
                kref_get(&zhdr->refcount);
                do_compact_page(zhdr, true);
+               clear_bit(PAGE_CLAIMED, &page->private);
                return;
        }
        kref_get(&zhdr->refcount);
        queue_work_on(zhdr->cpu, pool->compact_wq, &zhdr->work);
+       clear_bit(PAGE_CLAIMED, &page->private);
        z3fold_page_unlock(zhdr);
 }
 
index 54728d2eda189d870ecb97acd0278fdca4358761..d4bcfd8f95bf6435902e10cce19fc43b47d867d9 100644 (file)
@@ -172,7 +172,6 @@ int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
        if (err < 0)
                goto out_uninit_mvrp;
 
-       vlan->nest_level = dev_get_nest_level(real_dev) + 1;
        err = register_netdevice(dev);
        if (err < 0)
                goto out_uninit_mvrp;
index 93eadf1791239fd8eb81d4724ae37c7b959e5ca9..e5bff5cc6f97562a9887195ced5c572c1951915e 100644 (file)
@@ -489,36 +489,6 @@ static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
        dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
 }
 
-/*
- * vlan network devices have devices nesting below it, and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key vlan_netdev_xmit_lock_key;
-static struct lock_class_key vlan_netdev_addr_lock_key;
-
-static void vlan_dev_set_lockdep_one(struct net_device *dev,
-                                    struct netdev_queue *txq,
-                                    void *_subclass)
-{
-       lockdep_set_class_and_subclass(&txq->_xmit_lock,
-                                      &vlan_netdev_xmit_lock_key,
-                                      *(int *)_subclass);
-}
-
-static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
-{
-       lockdep_set_class_and_subclass(&dev->addr_list_lock,
-                                      &vlan_netdev_addr_lock_key,
-                                      subclass);
-       netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
-}
-
-static int vlan_dev_get_lock_subclass(struct net_device *dev)
-{
-       return vlan_dev_priv(dev)->nest_level;
-}
-
 static const struct header_ops vlan_header_ops = {
        .create  = vlan_dev_hard_header,
        .parse   = eth_header_parse,
@@ -609,8 +579,6 @@ static int vlan_dev_init(struct net_device *dev)
 
        SET_NETDEV_DEVTYPE(dev, &vlan_type);
 
-       vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
-
        vlan->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
        if (!vlan->vlan_pcpu_stats)
                return -ENOMEM;
@@ -812,7 +780,6 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_netpoll_cleanup    = vlan_dev_netpoll_cleanup,
 #endif
        .ndo_fix_features       = vlan_dev_fix_features,
-       .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
        .ndo_get_iflink         = vlan_dev_get_iflink,
 };
 
index b7528e77997c88b7cf15f0b48412e3abef0620e3..0ce530af534ddfed26a32b1b24f202ee7c582198 100644 (file)
@@ -668,7 +668,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* writable? */
index d78938e3e0085d9cff138b805148a0e77de3f654..5b0b20e6da956b4333b118bce1c09c5acef6d66f 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/kernel.h>
 #include <linux/kref.h>
 #include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/pkt_sched.h>
@@ -193,14 +195,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        unsigned char *ogm_buff;
        u32 random_seqno;
 
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        /* randomize initial seqno to avoid collision */
        get_random_bytes(&random_seqno, sizeof(random_seqno));
        atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
 
        hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
        ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
-       if (!ogm_buff)
+       if (!ogm_buff) {
+               mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
                return -ENOMEM;
+       }
 
        hard_iface->bat_iv.ogm_buff = ogm_buff;
 
@@ -212,35 +218,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
        batadv_ogm_packet->reserved = 0;
        batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
 
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        return 0;
 }
 
 static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
 {
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
        kfree(hard_iface->bat_iv.ogm_buff);
        hard_iface->bat_iv.ogm_buff = NULL;
+
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        ether_addr_copy(batadv_ogm_packet->orig,
                        hard_iface->net_dev->dev_addr);
        ether_addr_copy(batadv_ogm_packet->prev_sender,
                        hard_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 static void
 batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_ogm_packet *batadv_ogm_packet;
-       unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
+       void *ogm_buff;
 
-       batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+
+       ogm_buff = hard_iface->bat_iv.ogm_buff;
+       if (!ogm_buff)
+               goto unlock;
+
+       batadv_ogm_packet = ogm_buff;
        batadv_ogm_packet->ttl = BATADV_TTL;
+
+unlock:
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
 }
 
 /* when do we schedule our own ogm to be sent */
@@ -742,7 +772,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
        }
 }
 
-static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+/**
+ * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
+ * @hard_iface: interface whose ogm buffer should be transmitted
+ */
+static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
 {
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
@@ -753,9 +787,7 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
        u16 tvlv_len = 0;
        unsigned long send_time;
 
-       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
-           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
-               return;
+       lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
 
        /* the interface gets activated here to avoid race conditions between
         * the moment of activating the interface in
@@ -823,6 +855,17 @@ out:
                batadv_hardif_put(primary_if);
 }
 
+static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
+{
+       if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
+           hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
+               return;
+
+       mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
+       batadv_iv_ogm_schedule_buff(hard_iface);
+       mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
+}
+
 /**
  * batadv_iv_orig_ifinfo_sum() - Get bcast_own sum for originator over iterface
  * @orig_node: originator which reproadcasted the OGMs directly
index dc4f7430cb5a6528ac4635c943ab4d65db1a538b..8033f24f506cae2d734aea5296cb5bd3066e7277 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/list.h>
 #include <linux/lockdep.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/random.h>
 #include <linux/rculist.h>
@@ -256,14 +257,12 @@ static void batadv_v_ogm_queue_on_if(struct sk_buff *skb,
 }
 
 /**
- * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
- * @work: work queue item
+ * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
+ * @bat_priv: the bat priv with all the soft interface information
  */
-static void batadv_v_ogm_send(struct work_struct *work)
+static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
 {
        struct batadv_hard_iface *hard_iface;
-       struct batadv_priv_bat_v *bat_v;
-       struct batadv_priv *bat_priv;
        struct batadv_ogm2_packet *ogm_packet;
        struct sk_buff *skb, *skb_tmp;
        unsigned char *ogm_buff;
@@ -271,8 +270,7 @@ static void batadv_v_ogm_send(struct work_struct *work)
        u16 tvlv_len = 0;
        int ret;
 
-       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
-       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+       lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
 
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
                goto out;
@@ -363,6 +361,23 @@ out:
        return;
 }
 
+/**
+ * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
+ * @work: work queue item
+ */
+static void batadv_v_ogm_send(struct work_struct *work)
+{
+       struct batadv_priv_bat_v *bat_v;
+       struct batadv_priv *bat_priv;
+
+       bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
+       bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
+
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+       batadv_v_ogm_send_softif(bat_priv);
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
+}
+
 /**
  * batadv_v_ogm_aggr_work() - OGM queue periodic task per interface
  * @work: work queue item
@@ -424,11 +439,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
        struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
        struct batadv_ogm2_packet *ogm_packet;
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
        if (!bat_priv->bat_v.ogm_buff)
-               return;
+               goto unlock;
 
        ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
        ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
+
+unlock:
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
 
 /**
@@ -1050,6 +1069,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
        atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
        INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
 
+       mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
+
        return 0;
 }
 
@@ -1061,7 +1082,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
 {
        cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
 
+       mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
+
        kfree(bat_priv->bat_v.ogm_buff);
        bat_priv->bat_v.ogm_buff = NULL;
        bat_priv->bat_v.ogm_buff_len = 0;
+
+       mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
 }
index c90e47342bb0698ae460efe7f8ca35bd313d6cce..afb52282d5bd0d495febb5b7d636830f5475d74e 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/kref.h>
 #include <linux/limits.h>
 #include <linux/list.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/printk.h>
 #include <linux/rculist.h>
@@ -929,6 +930,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
        INIT_LIST_HEAD(&hard_iface->list);
        INIT_HLIST_HEAD(&hard_iface->neigh_list);
 
+       mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
        spin_lock_init(&hard_iface->neigh_list_lock);
        kref_init(&hard_iface->refcount);
 
index a1146cb10919e5647720f1ec6d0abf27fed652d0..5ee8e9a100f903d01fb6799828dfba6ae416ec66 100644 (file)
@@ -436,7 +436,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
        /* clean the netfilter state now that the batman-adv header has been
         * removed
         */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
                goto dropped;
@@ -740,36 +740,6 @@ static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto,
        return 0;
 }
 
-/* batman-adv network devices have devices nesting below it and are a special
- * "super class" of normal network devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key batadv_netdev_xmit_lock_key;
-static struct lock_class_key batadv_netdev_addr_lock_key;
-
-/**
- * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue
- * @dev: device which owns the tx queue
- * @txq: tx queue to modify
- * @_unused: always NULL
- */
-static void batadv_set_lockdep_class_one(struct net_device *dev,
-                                        struct netdev_queue *txq,
-                                        void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key);
-}
-
-/**
- * batadv_set_lockdep_class() - Set txq and addr_list lockdep class
- * @dev: network device to modify
- */
-static void batadv_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL);
-}
-
 /**
  * batadv_softif_init_late() - late stage initialization of soft interface
  * @dev: registered network device to modify
@@ -783,8 +753,6 @@ static int batadv_softif_init_late(struct net_device *dev)
        int ret;
        size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM;
 
-       batadv_set_lockdep_class(dev);
-
        bat_priv = netdev_priv(dev);
        bat_priv->soft_iface = dev;
 
index be7c02aa91e26dcc8d0c322fa162b0dd3a5a0eb9..4d7f1baee7b7da7d32060014400e89db9c7f0b7a 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/kref.h>
+#include <linux/mutex.h>
 #include <linux/netdevice.h>
 #include <linux/netlink.h>
 #include <linux/sched.h> /* for linux/wait.h */
@@ -81,6 +82,9 @@ struct batadv_hard_iface_bat_iv {
 
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
+
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
 };
 
 /**
@@ -1539,6 +1543,9 @@ struct batadv_priv_bat_v {
        /** @ogm_seqno: OGM sequence number - used to identify each OGM */
        atomic_t ogm_seqno;
 
+       /** @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len */
+       struct mutex ogm_buff_mutex;
+
        /** @ogm_wq: workqueue used to schedule OGM transmissions */
        struct delayed_work ogm_wq;
 };
index bb55d92691b068d32b281b1b3ceff58b8ac51fc4..4febc82a7c7613244125696cfd464099e10991dc 100644 (file)
@@ -571,15 +571,7 @@ static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev)
        return err < 0 ? NET_XMIT_DROP : err;
 }
 
-static int bt_dev_init(struct net_device *dev)
-{
-       netdev_lockdep_set_classes(dev);
-
-       return 0;
-}
-
 static const struct net_device_ops netdev_ops = {
-       .ndo_init               = bt_dev_init,
        .ndo_start_xmit         = bt_xmit,
 };
 
index 94ddf19998c7ee7098b52c9ef37f3e0296089fab..5f508c50649d0abc317ed83cc0768f6b7a64de96 100644 (file)
@@ -460,7 +460,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == BT_LISTEN)
                return bt_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -470,7 +470,7 @@ __poll_t bt_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_shutdown == SHUTDOWN_MASK)
                mask |= EPOLLHUP;
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == BT_CLOSED)
index 681b72862c165558e756afacda786998a0e9f185..e804a3016902b741205366b5e9876a094f9f5856 100644 (file)
@@ -24,8 +24,6 @@
 const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
 EXPORT_SYMBOL_GPL(nf_br_ops);
 
-static struct lock_class_key bridge_netdev_addr_lock_key;
-
 /* net device transmit always called with BH disabled */
 netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -108,11 +106,6 @@ out:
        return NETDEV_TX_OK;
 }
 
-static void br_set_lockdep_class(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &bridge_netdev_addr_lock_key);
-}
-
 static int br_dev_init(struct net_device *dev)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -150,7 +143,6 @@ static int br_dev_init(struct net_device *dev)
                br_mdb_hash_fini(br);
                br_fdb_hash_fini(br);
        }
-       br_set_lockdep_class(dev);
 
        return err;
 }
index 8842798c29e636ac2dbfe01f0bf2ba2f2d398c44..8096732223828a7110ab35d715fe1894ddf57a91 100644 (file)
@@ -33,6 +33,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
 {
        int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
        unsigned int hlen, ll_rs, mtu;
+       ktime_t tstamp = skb->tstamp;
        struct ip_frag_state state;
        struct iphdr *iph;
        int err;
@@ -80,6 +81,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk,
                        if (iter.frag)
                                ip_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, data, skb);
                        if (err || !iter.frag)
                                break;
@@ -93,7 +95,7 @@ slow_path:
         * This may also be a clone skbuff, we could preserve the geometry for
         * the copies but probably not worth the effort.
         */
-       ip_frag_init(skb, hlen, ll_rs, frag_max_size, &state);
+       ip_frag_init(skb, hlen, ll_rs, frag_max_size, false, &state);
 
        while (state.left > 0) {
                struct sk_buff *skb2;
@@ -104,6 +106,7 @@ slow_path:
                        goto blackhole;
                }
 
+               skb2->tstamp = tstamp;
                err = output(net, sk, data, skb2);
                if (err)
                        goto blackhole;
index 13ea920600aeb66af017134cbe5fcfd5ccc6020e..ef14da50a9819183c538f14b97b5e4986adafb45 100644 (file)
@@ -953,7 +953,7 @@ static __poll_t caif_poll(struct file *file,
                mask |= EPOLLRDHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue) ||
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                (sk->sk_shutdown & RCV_SHUTDOWN))
                mask |= EPOLLIN | EPOLLRDNORM;
 
index 4cc8dc5db2b73471ae3a15fda753912d5e869624..da3c24ed129cd64db8bdb1916afa552a47c1a5a3 100644 (file)
@@ -97,7 +97,7 @@ int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
        if (error)
                goto out_err;
 
-       if (sk->sk_receive_queue.prev != skb)
+       if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
                goto out;
 
        /* Socket shut down? */
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
                        break;
 
                sk_busy_loop(sk, flags & MSG_DONTWAIT);
-       } while (sk->sk_receive_queue.prev != *last);
+       } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
 
        error = -EAGAIN;
 
@@ -640,7 +640,7 @@ int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
                skb->len += copied;
                skb->truesize += truesize;
                if (sk && sk->sk_type == SOCK_STREAM) {
-                       sk->sk_wmem_queued += truesize;
+                       sk_wmem_queued_add(sk, truesize);
                        sk_mem_charge(sk, truesize);
                } else {
                        refcount_add(truesize, &skb->sk->sk_wmem_alloc);
@@ -767,7 +767,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -777,7 +777,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index bf3ed413abafe121d8c20ea4ca023ea770c111cc..99ac84ff398f42ff171d5882991724f2f4df11a4 100644 (file)
 #include "net-sysfs.h"
 
 #define MAX_GRO_SKBS 8
+#define MAX_NEST_DEV 8
 
 /* This should be increased if a protocol with a bigger head is added. */
 #define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -276,88 +277,6 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
 EXPORT_PER_CPU_SYMBOL(softnet_data);
 
-#ifdef CONFIG_LOCKDEP
-/*
- * register_netdevice() inits txq->_xmit_lock and sets lockdep class
- * according to dev->type
- */
-static const unsigned short netdev_lock_type[] = {
-        ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
-        ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
-        ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
-        ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
-        ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
-        ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
-        ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
-        ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
-        ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
-        ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
-        ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
-        ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
-        ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
-        ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
-        ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
-
-static const char *const netdev_lock_name[] = {
-       "_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
-       "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
-       "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
-       "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
-       "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
-       "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
-       "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
-       "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
-       "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
-       "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
-       "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
-       "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
-       "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
-       "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
-       "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
-
-static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
-static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
-
-static inline unsigned short netdev_lock_pos(unsigned short dev_type)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
-               if (netdev_lock_type[i] == dev_type)
-                       return i;
-       /* the last key is used by default */
-       return ARRAY_SIZE(netdev_lock_type) - 1;
-}
-
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-       int i;
-
-       i = netdev_lock_pos(dev_type);
-       lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-       int i;
-
-       i = netdev_lock_pos(dev->type);
-       lockdep_set_class_and_name(&dev->addr_list_lock,
-                                  &netdev_addr_lock_key[i],
-                                  netdev_lock_name[i]);
-}
-#else
-static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
-                                                unsigned short dev_type)
-{
-}
-static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
-{
-}
-#endif
-
 /*******************************************************************************
  *
  *             Protocol management and registration routines
@@ -6489,6 +6408,9 @@ struct netdev_adjacent {
        /* upper master flag, there can only be one master device per list */
        bool master;
 
+       /* lookup ignore flag */
+       bool ignore;
+
        /* counter for the number of times this device was added to us */
        u16 ref_nr;
 
@@ -6511,7 +6433,7 @@ static struct netdev_adjacent *__netdev_find_adj(struct net_device *adj_dev,
        return NULL;
 }
 
-static int __netdev_has_upper_dev(struct net_device *upper_dev, void *data)
+static int ____netdev_has_upper_dev(struct net_device *upper_dev, void *data)
 {
        struct net_device *dev = data;
 
@@ -6532,7 +6454,7 @@ bool netdev_has_upper_dev(struct net_device *dev,
 {
        ASSERT_RTNL();
 
-       return netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                             upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev);
@@ -6550,7 +6472,7 @@ EXPORT_SYMBOL(netdev_has_upper_dev);
 bool netdev_has_upper_dev_all_rcu(struct net_device *dev,
                                  struct net_device *upper_dev)
 {
-       return !!netdev_walk_all_upper_dev_rcu(dev, __netdev_has_upper_dev,
+       return !!netdev_walk_all_upper_dev_rcu(dev, ____netdev_has_upper_dev,
                                               upper_dev);
 }
 EXPORT_SYMBOL(netdev_has_upper_dev_all_rcu);
@@ -6594,6 +6516,22 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
 }
 EXPORT_SYMBOL(netdev_master_upper_dev_get);
 
+static struct net_device *__netdev_master_upper_dev_get(struct net_device *dev)
+{
+       struct netdev_adjacent *upper;
+
+       ASSERT_RTNL();
+
+       if (list_empty(&dev->adj_list.upper))
+               return NULL;
+
+       upper = list_first_entry(&dev->adj_list.upper,
+                                struct netdev_adjacent, list);
+       if (likely(upper->master) && !upper->ignore)
+               return upper->dev;
+       return NULL;
+}
+
 /**
  * netdev_has_any_lower_dev - Check if device is linked to some device
  * @dev: device
@@ -6644,6 +6582,23 @@ struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
 }
 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
 
+static struct net_device *__netdev_next_upper_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *upper;
+
+       upper = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&upper->list == &dev->adj_list.upper)
+               return NULL;
+
+       *iter = &upper->list;
+       *ignore = upper->ignore;
+
+       return upper->dev;
+}
+
 static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6661,34 +6616,111 @@ static struct net_device *netdev_next_upper_dev_rcu(struct net_device *dev,
        return upper->dev;
 }
 
+static int __netdev_walk_all_upper_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.upper;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = __netdev_next_upper_dev(now, &iter, &ignore);
+                       if (!udev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 int netdev_walk_all_upper_dev_rcu(struct net_device *dev,
                                  int (*fn)(struct net_device *dev,
                                            void *data),
                                  void *data)
 {
-       struct net_device *udev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *udev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.upper,
-            udev = netdev_next_upper_dev_rcu(dev, &iter);
-            udev;
-            udev = netdev_next_upper_dev_rcu(dev, &iter)) {
-               /* first is the upper device itself */
-               ret = fn(udev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.upper;
 
-               /* then look at all of its upper devices */
-               ret = netdev_walk_all_upper_dev_rcu(udev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       udev = netdev_next_upper_dev_rcu(now, &iter);
+                       if (!udev)
+                               break;
+
+                       next = udev;
+                       niter = &udev->adj_list.upper;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_upper_dev_rcu);
 
+static bool __netdev_has_upper_dev(struct net_device *dev,
+                                  struct net_device *upper_dev)
+{
+       ASSERT_RTNL();
+
+       return __netdev_walk_all_upper_dev(dev, ____netdev_has_upper_dev,
+                                          upper_dev);
+}
+
 /**
  * netdev_lower_get_next_private - Get the next ->private from the
  *                                lower neighbour list
@@ -6785,34 +6817,119 @@ static struct net_device *netdev_next_lower_dev(struct net_device *dev,
        return lower->dev;
 }
 
+static struct net_device *__netdev_next_lower_dev(struct net_device *dev,
+                                                 struct list_head **iter,
+                                                 bool *ignore)
+{
+       struct netdev_adjacent *lower;
+
+       lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+
+       if (&lower->list == &dev->adj_list.lower)
+               return NULL;
+
+       *iter = &lower->list;
+       *ignore = lower->ignore;
+
+       return lower->dev;
+}
+
 int netdev_walk_all_lower_dev(struct net_device *dev,
                              int (*fn)(struct net_device *dev,
                                        void *data),
                              void *data)
 {
-       struct net_device *ldev;
-       struct list_head *iter;
-       int ret;
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
 
-       for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev(dev, &iter);
-            ldev;
-            ldev = netdev_next_lower_dev(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+       now = dev;
+       iter = &dev->adj_list.lower;
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev(ldev, fn, data);
-               if (ret)
-                       return ret;
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
 }
 EXPORT_SYMBOL_GPL(netdev_walk_all_lower_dev);
 
+static int __netdev_walk_all_lower_dev(struct net_device *dev,
+                                      int (*fn)(struct net_device *dev,
+                                                void *data),
+                                      void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+       bool ignore;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = __netdev_next_lower_dev(now, &iter, &ignore);
+                       if (!ldev)
+                               break;
+                       if (ignore)
+                               continue;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
+       }
+
+       return 0;
+}
+
 static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
                                                    struct list_head **iter)
 {
@@ -6827,28 +6944,99 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
        return lower->dev;
 }
 
-int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
-                                 int (*fn)(struct net_device *dev,
-                                           void *data),
-                                 void *data)
+static u8 __netdev_upper_depth(struct net_device *dev)
+{
+       struct net_device *udev;
+       struct list_head *iter;
+       u8 max_depth = 0;
+       bool ignore;
+
+       for (iter = &dev->adj_list.upper,
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore);
+            udev;
+            udev = __netdev_next_upper_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < udev->upper_level)
+                       max_depth = udev->upper_level;
+       }
+
+       return max_depth;
+}
+
+static u8 __netdev_lower_depth(struct net_device *dev)
 {
        struct net_device *ldev;
        struct list_head *iter;
-       int ret;
+       u8 max_depth = 0;
+       bool ignore;
 
        for (iter = &dev->adj_list.lower,
-            ldev = netdev_next_lower_dev_rcu(dev, &iter);
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore);
             ldev;
-            ldev = netdev_next_lower_dev_rcu(dev, &iter)) {
-               /* first is the lower device itself */
-               ret = fn(ldev, data);
-               if (ret)
-                       return ret;
+            ldev = __netdev_next_lower_dev(dev, &iter, &ignore)) {
+               if (ignore)
+                       continue;
+               if (max_depth < ldev->lower_level)
+                       max_depth = ldev->lower_level;
+       }
 
-               /* then look at all of its lower devices */
-               ret = netdev_walk_all_lower_dev_rcu(ldev, fn, data);
-               if (ret)
-                       return ret;
+       return max_depth;
+}
+
+static int __netdev_update_upper_level(struct net_device *dev, void *data)
+{
+       dev->upper_level = __netdev_upper_depth(dev) + 1;
+       return 0;
+}
+
+static int __netdev_update_lower_level(struct net_device *dev, void *data)
+{
+       dev->lower_level = __netdev_lower_depth(dev) + 1;
+       return 0;
+}
+
+int netdev_walk_all_lower_dev_rcu(struct net_device *dev,
+                                 int (*fn)(struct net_device *dev,
+                                           void *data),
+                                 void *data)
+{
+       struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+       struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+       int ret, cur = 0;
+
+       now = dev;
+       iter = &dev->adj_list.lower;
+
+       while (1) {
+               if (now != dev) {
+                       ret = fn(now, data);
+                       if (ret)
+                               return ret;
+               }
+
+               next = NULL;
+               while (1) {
+                       ldev = netdev_next_lower_dev_rcu(now, &iter);
+                       if (!ldev)
+                               break;
+
+                       next = ldev;
+                       niter = &ldev->adj_list.lower;
+                       dev_stack[cur] = now;
+                       iter_stack[cur++] = iter;
+                       break;
+               }
+
+               if (!next) {
+                       if (!cur)
+                               return 0;
+                       next = dev_stack[--cur];
+                       niter = iter_stack[cur];
+               }
+
+               now = next;
+               iter = niter;
        }
 
        return 0;
@@ -6952,6 +7140,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        adj->master = master;
        adj->ref_nr = 1;
        adj->private = private;
+       adj->ignore = false;
        dev_hold(adj_dev);
 
        pr_debug("Insert adjacency: dev %s adj_dev %s adj->ref_nr %d; dev_hold on %s\n",
@@ -7102,14 +7291,17 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                return -EBUSY;
 
        /* To prevent loops, check if dev is not upper device to upper_dev. */
-       if (netdev_has_upper_dev(upper_dev, dev))
+       if (__netdev_has_upper_dev(upper_dev, dev))
                return -EBUSY;
 
+       if ((dev->lower_level + upper_dev->upper_level) > MAX_NEST_DEV)
+               return -EMLINK;
+
        if (!master) {
-               if (netdev_has_upper_dev(dev, upper_dev))
+               if (__netdev_has_upper_dev(dev, upper_dev))
                        return -EEXIST;
        } else {
-               master_dev = netdev_master_upper_dev_get(dev);
+               master_dev = __netdev_master_upper_dev_get(dev);
                if (master_dev)
                        return master_dev == upper_dev ? -EEXIST : -EBUSY;
        }
@@ -7131,6 +7323,13 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (ret)
                goto rollback;
 
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
+
        return 0;
 
 rollback:
@@ -7213,9 +7412,96 @@ void netdev_upper_dev_unlink(struct net_device *dev,
 
        call_netdevice_notifiers_info(NETDEV_CHANGEUPPER,
                                      &changeupper_info.info);
+
+       __netdev_update_upper_level(dev, NULL);
+       __netdev_walk_all_lower_dev(dev, __netdev_update_upper_level, NULL);
+
+       __netdev_update_lower_level(upper_dev, NULL);
+       __netdev_walk_all_upper_dev(upper_dev, __netdev_update_lower_level,
+                                   NULL);
 }
 EXPORT_SYMBOL(netdev_upper_dev_unlink);
 
+static void __netdev_adjacent_dev_set(struct net_device *upper_dev,
+                                     struct net_device *lower_dev,
+                                     bool val)
+{
+       struct netdev_adjacent *adj;
+
+       adj = __netdev_find_adj(lower_dev, &upper_dev->adj_list.lower);
+       if (adj)
+               adj->ignore = val;
+
+       adj = __netdev_find_adj(upper_dev, &lower_dev->adj_list.upper);
+       if (adj)
+               adj->ignore = val;
+}
+
+static void netdev_adjacent_dev_disable(struct net_device *upper_dev,
+                                       struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, true);
+}
+
+static void netdev_adjacent_dev_enable(struct net_device *upper_dev,
+                                      struct net_device *lower_dev)
+{
+       __netdev_adjacent_dev_set(upper_dev, lower_dev, false);
+}
+
+int netdev_adjacent_change_prepare(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev,
+                                  struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!new_dev)
+               return 0;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_disable(dev, old_dev);
+
+       err = netdev_upper_dev_link(new_dev, dev, extack);
+       if (err) {
+               if (old_dev && new_dev != old_dev)
+                       netdev_adjacent_dev_enable(dev, old_dev);
+               return err;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(netdev_adjacent_change_prepare);
+
+void netdev_adjacent_change_commit(struct net_device *old_dev,
+                                  struct net_device *new_dev,
+                                  struct net_device *dev)
+{
+       if (!new_dev || !old_dev)
+               return;
+
+       if (new_dev == old_dev)
+               return;
+
+       netdev_adjacent_dev_enable(dev, old_dev);
+       netdev_upper_dev_unlink(old_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_commit);
+
+void netdev_adjacent_change_abort(struct net_device *old_dev,
+                                 struct net_device *new_dev,
+                                 struct net_device *dev)
+{
+       if (!new_dev)
+               return;
+
+       if (old_dev && new_dev != old_dev)
+               netdev_adjacent_dev_enable(dev, old_dev);
+
+       netdev_upper_dev_unlink(new_dev, dev);
+}
+EXPORT_SYMBOL(netdev_adjacent_change_abort);
+
 /**
  * netdev_bonding_info_change - Dispatch event about slave change
  * @dev: device
@@ -7329,25 +7615,6 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
 EXPORT_SYMBOL(netdev_lower_dev_get_private);
 
 
-int dev_get_nest_level(struct net_device *dev)
-{
-       struct net_device *lower = NULL;
-       struct list_head *iter;
-       int max_nest = -1;
-       int nest;
-
-       ASSERT_RTNL();
-
-       netdev_for_each_lower_dev(dev, lower, iter) {
-               nest = dev_get_nest_level(lower);
-               if (max_nest < nest)
-                       max_nest = nest;
-       }
-
-       return max_nest + 1;
-}
-EXPORT_SYMBOL(dev_get_nest_level);
-
 /**
  * netdev_lower_change - Dispatch event about lower device state change
  * @lower_dev: device
@@ -8154,7 +8421,8 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
                        return -EINVAL;
                }
 
-               if (prog->aux->id == prog_id) {
+               /* prog->aux->id may be 0 for orphaned device-bound progs */
+               if (prog->aux->id && prog->aux->id == prog_id) {
                        bpf_prog_put(prog);
                        return 0;
                }
@@ -8619,7 +8887,7 @@ static void netdev_init_one_queue(struct net_device *dev,
 {
        /* Initialize queue lock */
        spin_lock_init(&queue->_xmit_lock);
-       netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+       lockdep_set_class(&queue->_xmit_lock, &dev->qdisc_xmit_lock_key);
        queue->xmit_lock_owner = -1;
        netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
        queue->dev = dev;
@@ -8666,6 +8934,43 @@ void netif_tx_stop_all_queues(struct net_device *dev)
 }
 EXPORT_SYMBOL(netif_tx_stop_all_queues);
 
+static void netdev_register_lockdep_key(struct net_device *dev)
+{
+       lockdep_register_key(&dev->qdisc_tx_busylock_key);
+       lockdep_register_key(&dev->qdisc_running_key);
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+}
+
+static void netdev_unregister_lockdep_key(struct net_device *dev)
+{
+       lockdep_unregister_key(&dev->qdisc_tx_busylock_key);
+       lockdep_unregister_key(&dev->qdisc_running_key);
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+}
+
+void netdev_update_lockdep_key(struct net_device *dev)
+{
+       struct netdev_queue *queue;
+       int i;
+
+       lockdep_unregister_key(&dev->qdisc_xmit_lock_key);
+       lockdep_unregister_key(&dev->addr_list_lock_key);
+
+       lockdep_register_key(&dev->qdisc_xmit_lock_key);
+       lockdep_register_key(&dev->addr_list_lock_key);
+
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               queue = netdev_get_tx_queue(dev, i);
+
+               lockdep_set_class(&queue->_xmit_lock,
+                                 &dev->qdisc_xmit_lock_key);
+       }
+}
+EXPORT_SYMBOL(netdev_update_lockdep_key);
+
 /**
  *     register_netdevice      - register a network device
  *     @dev: device to register
@@ -8700,7 +9005,7 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(!net);
 
        spin_lock_init(&dev->addr_list_lock);
-       netdev_set_addr_lockdep_class(dev);
+       lockdep_set_class(&dev->addr_list_lock, &dev->addr_list_lock_key);
 
        ret = dev_get_valid_name(net, dev, dev->name);
        if (ret < 0)
@@ -9210,8 +9515,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 
        dev_net_set(dev, &init_net);
 
+       netdev_register_lockdep_key(dev);
+
        dev->gso_max_size = GSO_MAX_SIZE;
        dev->gso_max_segs = GSO_MAX_SEGS;
+       dev->upper_level = 1;
+       dev->lower_level = 1;
 
        INIT_LIST_HEAD(&dev->napi_list);
        INIT_LIST_HEAD(&dev->unreg_list);
@@ -9292,6 +9601,8 @@ void free_netdev(struct net_device *dev)
        free_percpu(dev->pcpu_refcnt);
        dev->pcpu_refcnt = NULL;
 
+       netdev_unregister_lockdep_key(dev);
+
        /*  Compatibility with error handling in drivers */
        if (dev->reg_state == NETREG_UNINITIALIZED) {
                netdev_freemem(dev);
@@ -9460,7 +9771,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
        call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
        rcu_barrier();
 
-       new_nsid = peernet2id_alloc(dev_net(dev), net);
+       new_nsid = peernet2id_alloc(dev_net(dev), net, GFP_KERNEL);
        /* If there is an ifindex conflict assign a new one */
        if (__dev_get_by_index(net, dev->ifindex))
                new_ifindex = dev_new_index(net);
index 6393ba930097b49b26a70cf01b657da08514f552..2f949b5a1eb9cd9a65e0c0a6d089032a92e100e2 100644 (file)
@@ -637,7 +637,7 @@ int dev_uc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -667,7 +667,7 @@ int dev_uc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->uc, &from->uc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -691,7 +691,7 @@ void dev_uc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
@@ -858,7 +858,7 @@ int dev_mc_sync(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -888,7 +888,7 @@ int dev_mc_sync_multiple(struct net_device *to, struct net_device *from)
        if (to->addr_len != from->addr_len)
                return -EINVAL;
 
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        err = __hw_addr_sync_multiple(&to->mc, &from->mc, to->addr_len);
        if (!err)
                __dev_set_rx_mode(to);
@@ -912,7 +912,7 @@ void dev_mc_unsync(struct net_device *to, struct net_device *from)
                return;
 
        netif_addr_lock_bh(from);
-       netif_addr_lock_nested(to);
+       netif_addr_lock(to);
        __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
        __dev_set_rx_mode(to);
        netif_addr_unlock(to);
index e48680efe54a92bb19c5d7902fb25c16a9ce1d8a..f80151eeaf51691e762e9a5c46b70f4c258c8c07 100644 (file)
@@ -3172,7 +3172,7 @@ static int devlink_nl_cmd_param_get_dumpit(struct sk_buff *msg,
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI);
-                       if (err) {
+                       if (err && err != -EOPNOTSUPP) {
                                mutex_unlock(&devlink->lock);
                                goto out;
                        }
@@ -3432,7 +3432,7 @@ static int devlink_nl_cmd_port_param_get_dumpit(struct sk_buff *msg,
                                                NETLINK_CB(cb->skb).portid,
                                                cb->nlh->nlmsg_seq,
                                                NLM_F_MULTI);
-                               if (err) {
+                               if (err && err != -EOPNOTSUPP) {
                                        mutex_unlock(&devlink->lock);
                                        goto out;
                                }
@@ -4088,7 +4088,7 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                                           cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                           cb->extack);
                mutex_unlock(&devlink->lock);
-               if (err)
+               if (err && err != -EOPNOTSUPP)
                        break;
                idx++;
        }
index c763106c73fc5d660e8489da0f0823e0536fd96e..cd9bc67381b221aae31695f15dec0784973c5da9 100644 (file)
@@ -1396,11 +1396,13 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr)
 
 static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
 {
-       struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+       struct ethtool_wolinfo wol;
 
        if (!dev->ethtool_ops->get_wol)
                return -EOPNOTSUPP;
 
+       memset(&wol, 0, sizeof(struct ethtool_wolinfo));
+       wol.cmd = ETHTOOL_GWOL;
        dev->ethtool_ops->get_wol(dev, &wol);
 
        if (copy_to_user(useraddr, &wol, sizeof(wol)))
index ed6563622ce31dcced4e6ba622770e26f1f7756a..3fed5755494bd39cf55ca1806ead67609ae8b587 100644 (file)
@@ -4252,12 +4252,14 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                case SO_RCVBUF:
                        val = min_t(u32, val, sysctl_rmem_max);
                        sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-                       sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+                       WRITE_ONCE(sk->sk_rcvbuf,
+                                  max_t(int, val * 2, SOCK_MIN_RCVBUF));
                        break;
                case SO_SNDBUF:
                        val = min_t(u32, val, sysctl_wmem_max);
                        sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-                       sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+                       WRITE_ONCE(sk->sk_sndbuf,
+                                  max_t(int, val * 2, SOCK_MIN_SNDBUF));
                        break;
                case SO_MAX_PACING_RATE: /* 32bit version */
                        if (val != ~0U)
@@ -4274,7 +4276,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
                case SO_RCVLOWAT:
                        if (val < 0)
                                val = INT_MAX;
-                       sk->sk_rcvlowat = val ? : 1;
+                       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
                        break;
                case SO_MARK:
                        if (sk->sk_mark != val) {
index 7c09d87d3269e4d01fd895c595a3a6e67d350685..68eda10d0680749ef07372951debab913449e661 100644 (file)
@@ -1350,30 +1350,21 @@ out_bad:
 }
 EXPORT_SYMBOL(__skb_flow_dissect);
 
-static u32 hashrnd __read_mostly;
+static siphash_key_t hashrnd __read_mostly;
 static __always_inline void __flow_hash_secret_init(void)
 {
        net_get_random_once(&hashrnd, sizeof(hashrnd));
 }
 
-static __always_inline u32 __flow_hash_words(const u32 *words, u32 length,
-                                            u32 keyval)
+static const void *flow_keys_hash_start(const struct flow_keys *flow)
 {
-       return jhash2(words, length, keyval);
-}
-
-static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow)
-{
-       const void *p = flow;
-
-       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32));
-       return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET);
+       BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT);
+       return &flow->FLOW_KEYS_HASH_START_FIELD;
 }
 
 static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
 {
        size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs);
-       BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32));
        BUILD_BUG_ON(offsetof(typeof(*flow), addrs) !=
                     sizeof(*flow) - sizeof(flow->addrs));
 
@@ -1388,7 +1379,7 @@ static inline size_t flow_keys_hash_length(const struct flow_keys *flow)
                diff -= sizeof(flow->addrs.tipckey);
                break;
        }
-       return (sizeof(*flow) - diff) / sizeof(u32);
+       return sizeof(*flow) - diff;
 }
 
 __be32 flow_get_u32_src(const struct flow_keys *flow)
@@ -1454,14 +1445,15 @@ static inline void __flow_hash_consistentify(struct flow_keys *keys)
        }
 }
 
-static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys,
+                                       const siphash_key_t *keyval)
 {
        u32 hash;
 
        __flow_hash_consistentify(keys);
 
-       hash = __flow_hash_words(flow_keys_hash_start(keys),
-                                flow_keys_hash_length(keys), keyval);
+       hash = siphash(flow_keys_hash_start(keys),
+                      flow_keys_hash_length(keys), keyval);
        if (!hash)
                hash = 1;
 
@@ -1471,12 +1463,13 @@ static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval)
 u32 flow_hash_from_keys(struct flow_keys *keys)
 {
        __flow_hash_secret_init();
-       return __flow_hash_from_keys(keys, hashrnd);
+       return __flow_hash_from_keys(keys, &hashrnd);
 }
 EXPORT_SYMBOL(flow_hash_from_keys);
 
 static inline u32 ___skb_get_hash(const struct sk_buff *skb,
-                                 struct flow_keys *keys, u32 keyval)
+                                 struct flow_keys *keys,
+                                 const siphash_key_t *keyval)
 {
        skb_flow_dissect_flow_keys(skb, keys,
                                   FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
@@ -1524,7 +1517,7 @@ u32 __skb_get_hash_symmetric(const struct sk_buff *skb)
                           &keys, NULL, 0, 0, 0,
                           FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
 
-       return __flow_hash_from_keys(&keys, hashrnd);
+       return __flow_hash_from_keys(&keys, &hashrnd);
 }
 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
 
@@ -1544,13 +1537,14 @@ void __skb_get_hash(struct sk_buff *skb)
 
        __flow_hash_secret_init();
 
-       hash = ___skb_get_hash(skb, &keys, hashrnd);
+       hash = ___skb_get_hash(skb, &keys, &hashrnd);
 
        __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys));
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
-__u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb)
+__u32 skb_get_hash_perturb(const struct sk_buff *skb,
+                          const siphash_key_t *perturb)
 {
        struct flow_keys keys;
 
index f93785e5833c1f82eed0176e3f939b32216dbaab..74cfb8b5ab3306485385e3e5682350fe9a0709dc 100644 (file)
@@ -88,11 +88,16 @@ static int bpf_lwt_input_reroute(struct sk_buff *skb)
        int err = -EINVAL;
 
        if (skb->protocol == htons(ETH_P_IP)) {
+               struct net_device *dev = skb_dst(skb)->dev;
                struct iphdr *iph = ip_hdr(skb);
 
+               dev_hold(dev);
+               skb_dst_drop(skb);
                err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
-                                          iph->tos, skb_dst(skb)->dev);
+                                          iph->tos, dev);
+               dev_put(dev);
        } else if (skb->protocol == htons(ETH_P_IPV6)) {
+               skb_dst_drop(skb);
                err = ipv6_stub->ipv6_route_input(skb);
        } else {
                err = -EAFNOSUPPORT;
index a0e0d298c9918f55afe34264c63d0f791160c898..39402840025e18682da44f40c509b265f9743fd1 100644 (file)
@@ -245,11 +245,12 @@ static int __peernet2id(struct net *net, struct net *peer)
        return __peernet2id_alloc(net, peer, &no);
 }
 
-static void rtnl_net_notifyid(struct net *net, int cmd, int id);
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+                             struct nlmsghdr *nlh, gfp_t gfp);
 /* This function returns the id of a peer netns. If no id is assigned, one will
  * be allocated and returned.
  */
-int peernet2id_alloc(struct net *net, struct net *peer)
+int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
 {
        bool alloc = false, alive = false;
        int id;
@@ -268,7 +269,7 @@ int peernet2id_alloc(struct net *net, struct net *peer)
        id = __peernet2id_alloc(net, peer, &alloc);
        spin_unlock_bh(&net->nsid_lock);
        if (alloc && id >= 0)
-               rtnl_net_notifyid(net, RTM_NEWNSID, id);
+               rtnl_net_notifyid(net, RTM_NEWNSID, id, 0, NULL, gfp);
        if (alive)
                put_net(peer);
        return id;
@@ -478,6 +479,7 @@ struct net *copy_net_ns(unsigned long flags,
 
        if (rv < 0) {
 put_userns:
+               key_remove_domain(net->key_domain);
                put_user_ns(user_ns);
                net_drop_ns(net);
 dec_ucounts:
@@ -532,7 +534,8 @@ static void unhash_nsid(struct net *net, struct net *last)
                        idr_remove(&tmp->netns_ids, id);
                spin_unlock_bh(&tmp->nsid_lock);
                if (id >= 0)
-                       rtnl_net_notifyid(tmp, RTM_DELNSID, id);
+                       rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
+                                         GFP_KERNEL);
                if (tmp == last)
                        break;
        }
@@ -764,7 +767,8 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = alloc_netid(net, peer, nsid);
        spin_unlock_bh(&net->nsid_lock);
        if (err >= 0) {
-               rtnl_net_notifyid(net, RTM_NEWNSID, err);
+               rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
+                                 nlh, GFP_KERNEL);
                err = 0;
        } else if (err == -ENOSPC && nsid >= 0) {
                err = -EEXIST;
@@ -1051,16 +1055,19 @@ end:
        return err < 0 ? err : skb->len;
 }
 
-static void rtnl_net_notifyid(struct net *net, int cmd, int id)
+static void rtnl_net_notifyid(struct net *net, int cmd, int id, u32 portid,
+                             struct nlmsghdr *nlh, gfp_t gfp)
 {
        struct net_fill_args fillargs = {
+               .portid = portid,
+               .seq = nlh ? nlh->nlmsg_seq : 0,
                .cmd = cmd,
                .nsid = id,
        };
        struct sk_buff *msg;
        int err = -ENOMEM;
 
-       msg = nlmsg_new(rtnl_net_get_size(), GFP_KERNEL);
+       msg = nlmsg_new(rtnl_net_get_size(), gfp);
        if (!msg)
                goto out;
 
@@ -1068,7 +1075,7 @@ static void rtnl_net_notifyid(struct net *net, int cmd, int id)
        if (err < 0)
                goto err_out;
 
-       rtnl_notify(msg, net, 0, RTNLGRP_NSID, NULL, 0);
+       rtnl_notify(msg, net, portid, RTNLGRP_NSID, nlh, gfp);
        return;
 
 err_out:
index c9bb00008528414486bb58ba4c26b003545c6ae4..f35c2e9984062ba4bed637eaeace4eb9e71dadc0 100644 (file)
@@ -96,7 +96,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
 
        fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq;
 
-       tcp_sk(sk)->fastopen_rsk = NULL;
+       RCU_INIT_POINTER(tcp_sk(sk)->fastopen_rsk, NULL);
        spin_lock_bh(&fastopenq->lock);
        fastopenq->qlen--;
        tcp_rsk(req)->tfo_listener = false;
index 1ee6460f82756be6dd797b000c11f29826e2e710..c81cd80114d993ad1d924c3e388813474c211904 100644 (file)
@@ -1523,7 +1523,7 @@ static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb,
 
 static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                                  const struct net_device *dev,
-                                 struct net *src_net)
+                                 struct net *src_net, gfp_t gfp)
 {
        bool put_iflink = false;
 
@@ -1531,7 +1531,7 @@ static int rtnl_fill_link_netnsid(struct sk_buff *skb,
                struct net *link_net = dev->rtnl_link_ops->get_link_net(dev);
 
                if (!net_eq(dev_net(dev), link_net)) {
-                       int id = peernet2id_alloc(src_net, link_net);
+                       int id = peernet2id_alloc(src_net, link_net, gfp);
 
                        if (nla_put_s32(skb, IFLA_LINK_NETNSID, id))
                                return -EMSGSIZE;
@@ -1589,7 +1589,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                            int type, u32 pid, u32 seq, u32 change,
                            unsigned int flags, u32 ext_filter_mask,
                            u32 event, int *new_nsid, int new_ifindex,
-                           int tgt_netnsid)
+                           int tgt_netnsid, gfp_t gfp)
 {
        struct ifinfomsg *ifm;
        struct nlmsghdr *nlh;
@@ -1681,7 +1681,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb,
                        goto nla_put_failure;
        }
 
-       if (rtnl_fill_link_netnsid(skb, dev, src_net))
+       if (rtnl_fill_link_netnsid(skb, dev, src_net, gfp))
                goto nla_put_failure;
 
        if (new_nsid &&
@@ -2001,7 +2001,7 @@ walk_entries:
                                               NETLINK_CB(cb->skb).portid,
                                               nlh->nlmsg_seq, 0, flags,
                                               ext_filter_mask, 0, NULL, 0,
-                                              netnsid);
+                                              netnsid, GFP_KERNEL);
 
                        if (err < 0) {
                                if (likely(skb->len))
@@ -2355,6 +2355,7 @@ static int do_set_master(struct net_device *dev, int ifindex,
                        err = ops->ndo_del_slave(upper_dev, dev);
                        if (err)
                                return err;
+                       netdev_update_lockdep_key(dev);
                } else {
                        return -EOPNOTSUPP;
                }
@@ -3359,7 +3360,7 @@ static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr *nlh,
        err = rtnl_fill_ifinfo(nskb, dev, net,
                               RTM_NEWLINK, NETLINK_CB(skb).portid,
                               nlh->nlmsg_seq, 0, 0, ext_filter_mask,
-                              0, NULL, 0, netnsid);
+                              0, NULL, 0, netnsid, GFP_KERNEL);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size */
                WARN_ON(err == -EMSGSIZE);
@@ -3471,7 +3472,7 @@ struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev,
 
        err = rtnl_fill_ifinfo(skb, dev, dev_net(dev),
                               type, 0, 0, change, 0, 0, event,
-                              new_nsid, new_ifindex, -1);
+                              new_nsid, new_ifindex, -1, flags);
        if (err < 0) {
                /* -EMSGSIZE implies BUG in if_nlmsg_size() */
                WARN_ON(err == -EMSGSIZE);
@@ -3916,7 +3917,7 @@ static int valid_fdb_dump_strict(const struct nlmsghdr *nlh,
        ndm = nlmsg_data(nlh);
        if (ndm->ndm_pad1  || ndm->ndm_pad2  || ndm->ndm_state ||
            ndm->ndm_flags || ndm->ndm_type) {
-               NL_SET_ERR_MSG(extack, "Invalid values in header for fbd dump request");
+               NL_SET_ERR_MSG(extack, "Invalid values in header for fdb dump request");
                return -EINVAL;
        }
 
index 01d65206f4fbfc207ccbac6a9a21cf13797fc35c..867e61df00dbc18efcb6251600a04f9ddb12bd92 100644 (file)
@@ -4415,7 +4415,7 @@ static void skb_set_err_queue(struct sk_buff *skb)
 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
 {
        if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned int)sk->sk_rcvbuf)
+           (unsigned int)READ_ONCE(sk->sk_rcvbuf))
                return -ENOMEM;
 
        skb_orphan(skb);
@@ -5120,7 +5120,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->ignore_df = 0;
        skb_dst_drop(skb);
        skb_ext_reset(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
 #ifdef CONFIG_NET_SWITCHDEV
@@ -5477,12 +5477,14 @@ static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
  * @skb: buffer
  * @mpls_lse: MPLS label stack entry to push
  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
+ * @mac_len: length of the MAC header
  *
  * Expects skb->data at mac header.
  *
  * Returns 0 on success, -errno otherwise.
  */
-int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
+int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
+                 int mac_len)
 {
        struct mpls_shim_hdr *lse;
        int err;
@@ -5499,15 +5501,15 @@ int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto)
                return err;
 
        if (!skb->inner_protocol) {
-               skb_set_inner_network_header(skb, skb->mac_len);
+               skb_set_inner_network_header(skb, mac_len);
                skb_set_inner_protocol(skb, skb->protocol);
        }
 
        skb_push(skb, MPLS_HLEN);
        memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
-               skb->mac_len);
+               mac_len);
        skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb->mac_len);
+       skb_set_network_header(skb, mac_len);
 
        lse = mpls_hdr(skb);
        lse->label_stack_entry = mpls_lse;
@@ -5526,29 +5528,30 @@ EXPORT_SYMBOL_GPL(skb_mpls_push);
  *
  * @skb: buffer
  * @next_proto: ethertype of header after popped MPLS header
+ * @mac_len: length of the MAC header
  *
  * Expects skb->data at mac header.
  *
  * Returns 0 on success, -errno otherwise.
  */
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto)
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len)
 {
        int err;
 
        if (unlikely(!eth_p_mpls(skb->protocol)))
-               return -EINVAL;
+               return 0;
 
-       err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
+       err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
        if (unlikely(err))
                return err;
 
        skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
        memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
-               skb->mac_len);
+               mac_len);
 
        __skb_pull(skb, MPLS_HLEN);
        skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb->mac_len);
+       skb_set_network_header(skb, mac_len);
 
        if (skb->dev && skb->dev->type == ARPHRD_ETHER) {
                struct ethhdr *hdr;
index 07863edbe6fc4842e47ebebf00bc21bc406d9264..ac78a570e43add1ce263841212b3759dbbfd8eae 100644 (file)
@@ -522,7 +522,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb,
                rc = sk_backlog_rcv(sk, skb);
 
                mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
-       } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
+       } else if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf))) {
                bh_unlock_sock(sk);
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
@@ -785,7 +785,8 @@ set_sndbuf:
                 */
                val = min_t(int, val, INT_MAX / 2);
                sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-               sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          max_t(int, val * 2, SOCK_MIN_SNDBUF));
                /* Wake up sending tasks if we upped the value. */
                sk->sk_write_space(sk);
                break;
@@ -831,7 +832,8 @@ set_rcvbuf:
                 * returning the value we actually used in getsockopt
                 * is the most desirable behavior.
                 */
-               sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
+               WRITE_ONCE(sk->sk_rcvbuf,
+                          max_t(int, val * 2, SOCK_MIN_RCVBUF));
                break;
 
        case SO_RCVBUFFORCE:
@@ -974,7 +976,7 @@ set_rcvbuf:
                if (sock->ops->set_rcvlowat)
                        ret = sock->ops->set_rcvlowat(sk, val);
                else
-                       sk->sk_rcvlowat = val ? : 1;
+                       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
                break;
 
        case SO_RCVTIMEO_OLD:
@@ -1125,7 +1127,7 @@ set_rcvbuf:
                break;
                }
        case SO_INCOMING_CPU:
-               sk->sk_incoming_cpu = val;
+               WRITE_ONCE(sk->sk_incoming_cpu, val);
                break;
 
        case SO_CNX_ADVICE:
@@ -1474,7 +1476,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                break;
 
        case SO_INCOMING_CPU:
-               v.val = sk->sk_incoming_cpu;
+               v.val = READ_ONCE(sk->sk_incoming_cpu);
                break;
 
        case SO_MEMINFO:
@@ -1700,8 +1702,6 @@ static void __sk_destruct(struct rcu_head *head)
                sk_filter_uncharge(sk, filter);
                RCU_INIT_POINTER(sk->sk_filter, NULL);
        }
-       if (rcu_access_pointer(sk->sk_reuseport_cb))
-               reuseport_detach_sock(sk);
 
        sock_disable_timestamp(sk, SK_FLAGS_TIMESTAMP);
 
@@ -1728,7 +1728,14 @@ static void __sk_destruct(struct rcu_head *head)
 
 void sk_destruct(struct sock *sk)
 {
-       if (sock_flag(sk, SOCK_RCU_FREE))
+       bool use_call_rcu = sock_flag(sk, SOCK_RCU_FREE);
+
+       if (rcu_access_pointer(sk->sk_reuseport_cb)) {
+               reuseport_detach_sock(sk);
+               use_call_rcu = true;
+       }
+
+       if (use_call_rcu)
                call_rcu(&sk->sk_rcu, __sk_destruct);
        else
                __sk_destruct(&sk->sk_rcu);
@@ -2083,8 +2090,10 @@ EXPORT_SYMBOL(sock_i_ino);
 struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
                             gfp_t priority)
 {
-       if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+       if (force ||
+           refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf)) {
                struct sk_buff *skb = alloc_skb(size, priority);
+
                if (skb) {
                        skb_set_owner_w(skb, sk);
                        return skb;
@@ -2185,7 +2194,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
                        break;
                set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+               if (refcount_read(&sk->sk_wmem_alloc) < READ_ONCE(sk->sk_sndbuf))
                        break;
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        break;
@@ -2220,7 +2229,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
                if (sk->sk_shutdown & SEND_SHUTDOWN)
                        goto failure;
 
-               if (sk_wmem_alloc_get(sk) < sk->sk_sndbuf)
+               if (sk_wmem_alloc_get(sk) < READ_ONCE(sk->sk_sndbuf))
                        break;
 
                sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
@@ -2329,8 +2338,8 @@ static void sk_leave_memory_pressure(struct sock *sk)
        } else {
                unsigned long *memory_pressure = sk->sk_prot->memory_pressure;
 
-               if (memory_pressure && *memory_pressure)
-                       *memory_pressure = 0;
+               if (memory_pressure && READ_ONCE(*memory_pressure))
+                       WRITE_ONCE(*memory_pressure, 0);
        }
 }
 
@@ -2801,7 +2810,7 @@ static void sock_def_write_space(struct sock *sk)
        /* Do not wake up a writer until he can make "significant"
         * progress.  --DaveM
         */
-       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+       if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= READ_ONCE(sk->sk_sndbuf)) {
                wq = rcu_dereference(sk->sk_wq);
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
@@ -3199,13 +3208,13 @@ void sk_get_meminfo(const struct sock *sk, u32 *mem)
        memset(mem, 0, sizeof(*mem) * SK_MEMINFO_VARS);
 
        mem[SK_MEMINFO_RMEM_ALLOC] = sk_rmem_alloc_get(sk);
-       mem[SK_MEMINFO_RCVBUF] = sk->sk_rcvbuf;
+       mem[SK_MEMINFO_RCVBUF] = READ_ONCE(sk->sk_rcvbuf);
        mem[SK_MEMINFO_WMEM_ALLOC] = sk_wmem_alloc_get(sk);
-       mem[SK_MEMINFO_SNDBUF] = sk->sk_sndbuf;
+       mem[SK_MEMINFO_SNDBUF] = READ_ONCE(sk->sk_sndbuf);
        mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
-       mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
+       mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued);
        mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
-       mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+       mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
        mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
 }
 
@@ -3492,7 +3501,7 @@ static long sock_prot_memory_allocated(struct proto *proto)
        return proto->memory_allocated != NULL ? proto_memory_allocated(proto) : -1L;
 }
 
-static char *sock_prot_memory_pressure(struct proto *proto)
+static const char *sock_prot_memory_pressure(struct proto *proto)
 {
        return proto->memory_pressure != NULL ?
        proto_memory_pressure(proto) ? "yes" : "no" : "NI";
@@ -3591,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty(&sk->sk_receive_queue) ||
+       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
               sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
index b685bc82f8d03b39423c1e8f0c5e63d16e8921e3..0d8f782c25ccc031e5322beccb0242ee42b032b9 100644 (file)
@@ -117,7 +117,7 @@ int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                                                    inet->inet_daddr,
                                                    inet->inet_sport,
                                                    inet->inet_dport);
-       inet->inet_id = dp->dccps_iss ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        err = dccp_connect(sk);
        rt = NULL;
@@ -871,7 +871,7 @@ lookup:
 
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_and_relse;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted);
 
index 0ea75286abf46ad9da2e6c58d4facc91f731b8bf..3349ea81f9016fb785ec888fadf86faa4d859ed7 100644 (file)
@@ -1205,7 +1205,7 @@ static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table  *wai
        struct dn_scp *scp = DN_SK(sk);
        __poll_t mask = datagram_poll(file, sock, wait);
 
-       if (!skb_queue_empty(&scp->other_receive_queue))
+       if (!skb_queue_empty_lockless(&scp->other_receive_queue))
                mask |= EPOLLRDBAND;
 
        return mask;
index 73002022c9d820df95b9df1841ce3724c4d41070..716d265ba8ca95e88afeb3788b316a25fe331c38 100644 (file)
@@ -46,7 +46,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
        dst->index = index;
 
        INIT_LIST_HEAD(&dst->list);
-       list_add_tail(&dsa_tree_list, &dst->list);
+       list_add_tail(&dst->list, &dsa_tree_list);
 
        kref_init(&dst->refcount);
 
index a8e52c9967f4c1a9c24a5c91f143e09b6136b569..3255dfc97f865b1c2e4634d9f4a48925b7a1804a 100644 (file)
@@ -310,8 +310,6 @@ static void dsa_master_reset_mtu(struct net_device *dev)
        rtnl_unlock();
 }
 
-static struct lock_class_key dsa_master_addr_list_lock_key;
-
 int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
 {
        int ret;
@@ -325,9 +323,6 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
        wmb();
 
        dev->dsa_ptr = cpu_dp;
-       lockdep_set_class(&dev->addr_list_lock,
-                         &dsa_master_addr_list_lock_key);
-
        ret = dsa_master_ethtool_setup(dev);
        if (ret)
                return ret;
index 75d58229a4bd356a18375a4e0c9e22e9e9a64082..028e65f4b5bafc93af03820055dab72fef4f708f 100644 (file)
@@ -1341,15 +1341,6 @@ static int dsa_slave_phy_setup(struct net_device *slave_dev)
        return ret;
 }
 
-static struct lock_class_key dsa_slave_netdev_xmit_lock_key;
-static void dsa_slave_set_lockdep_class_one(struct net_device *dev,
-                                           struct netdev_queue *txq,
-                                           void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock,
-                         &dsa_slave_netdev_xmit_lock_key);
-}
-
 int dsa_slave_suspend(struct net_device *slave_dev)
 {
        struct dsa_port *dp = dsa_slave_to_port(slave_dev);
@@ -1433,9 +1424,6 @@ int dsa_slave_create(struct dsa_port *port)
        slave_dev->max_mtu = ETH_MAX_MTU;
        SET_NETDEV_DEVTYPE(slave_dev, &dsa_type);
 
-       netdev_for_each_tx_queue(slave_dev, dsa_slave_set_lockdep_class_one,
-                                NULL);
-
        SET_NETDEV_DEV(slave_dev, port->ds->dev);
        slave_dev->dev.of_node = port->dn;
        slave_dev->vlan_features = master->vlan_features;
index 9c9aff3e52cf9826998f42c30b837770aba4c745..63ef2a14c9343e74453b6e88b1ef56d4d39a3bc7 100644 (file)
@@ -156,7 +156,11 @@ static struct sk_buff
        /* Step 1: A timestampable frame was received.
         * Buffer it until we get its meta frame.
         */
-       if (is_link_local && sp->data->hwts_rx_en) {
+       if (is_link_local) {
+               if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+                       /* Do normal processing. */
+                       return skb;
+
                spin_lock(&sp->data->meta_lock);
                /* Was this a link-local frame instead of the meta
                 * that we were expecting?
@@ -187,6 +191,12 @@ static struct sk_buff
        } else if (is_meta) {
                struct sk_buff *stampable_skb;
 
+               /* Drop the meta frame if we're not in the right state
+                * to process it.
+                */
+               if (!test_bit(SJA1105_HWTS_RX_EN, &sp->data->state))
+                       return NULL;
+
                spin_lock(&sp->data->meta_lock);
 
                stampable_skb = sp->data->stampable_skb;
index 3297e7fa99458b13c40609588f187d366cf37411..c0b107cdd7153f7fbafdc546fea3511740e6933b 100644 (file)
@@ -58,13 +58,6 @@ static const struct header_ops lowpan_header_ops = {
        .create = lowpan_header_create,
 };
 
-static int lowpan_dev_init(struct net_device *ldev)
-{
-       netdev_lockdep_set_classes(ldev);
-
-       return 0;
-}
-
 static int lowpan_open(struct net_device *dev)
 {
        if (!open_count)
@@ -96,7 +89,6 @@ static int lowpan_get_iflink(const struct net_device *dev)
 }
 
 static const struct net_device_ops lowpan_netdev_ops = {
-       .ndo_init               = lowpan_dev_init,
        .ndo_start_xmit         = lowpan_xmit,
        .ndo_open               = lowpan_open,
        .ndo_stop               = lowpan_stop,
index 9a0fe0c2fa02c9707e6fc8c02529a48e84f7d680..4a8550c49202db13b17d5cf4ed1e44dd8852c212 100644 (file)
@@ -73,7 +73,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
        reuseport_has_conns(sk, true);
        sk->sk_state = TCP_ESTABLISHED;
        sk_set_txhash(sk);
-       inet->inet_id = jiffies;
+       inet->inet_id = prandom_u32();
 
        sk_dst_set(sk, &rt->dst);
        err = 0;
index dde77f72e03edc8423e98c6f8a4a349aa4e306ec..71c78d223dfd32b1b95ee9e9aeffdaa95fd1a8c6 100644 (file)
@@ -1148,7 +1148,7 @@ void fib_modify_prefix_metric(struct in_ifaddr *ifa, u32 new_metric)
        if (!(dev->flags & IFF_UP) ||
            ifa->ifa_flags & (IFA_F_SECONDARY | IFA_F_NOPREFIXROUTE) ||
            ipv4_is_zeronet(prefix) ||
-           prefix == ifa->ifa_local || ifa->ifa_prefixlen == 32)
+           (prefix == ifa->ifa_local && ifa->ifa_prefixlen == 32))
                return;
 
        /* add the new */
index a9183543ca305e1723317a3dfd34af29087b605a..eb30fc1770def741950215f59a4e3ab0f91c6293 100644 (file)
@@ -906,7 +906,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
        percpu_counter_inc(sk->sk_prot->orphan_count);
 
        if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
-               BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+               BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req);
                BUG_ON(sk != req->rsk_listener);
 
                /* Paranoid, to prevent race condition if
@@ -915,7 +915,7 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
                 * Also to satisfy an assertion in
                 * tcp_v4_destroy_sock().
                 */
-               tcp_sk(child)->fastopen_rsk = NULL;
+               RCU_INIT_POINTER(tcp_sk(child)->fastopen_rsk, NULL);
        }
        inet_csk_destroy_sock(child);
 }
@@ -934,7 +934,7 @@ struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
                req->sk = child;
                req->dl_next = NULL;
                if (queue->rskq_accept_head == NULL)
-                       queue->rskq_accept_head = req;
+                       WRITE_ONCE(queue->rskq_accept_head, req);
                else
                        queue->rskq_accept_tail->dl_next = req;
                queue->rskq_accept_tail = req;
index bbb005eb5218c2765567b1d14ef564d2332479cc..7dc79b973e6edcc64e668e14c71c732ca1187e8f 100644 (file)
@@ -193,7 +193,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        if (ext & (1 << (INET_DIAG_MEMINFO - 1))) {
                struct inet_diag_meminfo minfo = {
                        .idiag_rmem = sk_rmem_alloc_get(sk),
-                       .idiag_wmem = sk->sk_wmem_queued,
+                       .idiag_wmem = READ_ONCE(sk->sk_wmem_queued),
                        .idiag_fmem = sk->sk_forward_alloc,
                        .idiag_tmem = sk_wmem_alloc_get(sk),
                };
index 97824864e40d13bac71510920ae125c6a89fef1d..83fb00153018f16678e71695134f7ae4d30ee0a3 100644 (file)
@@ -240,7 +240,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = sk->sk_family == PF_INET ? 2 : 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index a53a543fe0555e891fa61fee36a8f8a47007764d..10636fb6093e3cba09cfe6835077edb664e7244d 100644 (file)
@@ -509,9 +509,9 @@ static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        key = &tun_info->key;
        if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                goto err_free_skb;
-       md = ip_tunnel_info_opts(tun_info);
-       if (!md)
+       if (tun_info->options_len < sizeof(*md))
                goto err_free_skb;
+       md = ip_tunnel_info_opts(tun_info);
 
        /* ERSPAN has fixed 8 byte GRE header */
        version = md->version;
@@ -1446,6 +1446,7 @@ static void erspan_setup(struct net_device *dev)
        struct ip_tunnel *t = netdev_priv(dev);
 
        ether_setup(dev);
+       dev->max_mtu = 0;
        dev->netdev_ops = &erspan_netdev_ops;
        dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
index 1e2392b7c64e2cac55fc401f36fa52bbe6b4c7f4..c59a78a267c37ab3a434c38c0ab236f2f5f2a0f1 100644 (file)
@@ -199,7 +199,7 @@ resubmit:
                                kfree_skb(skb);
                                return;
                        }
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
                }
                ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
                                      skb);
index 28fca408812c5576fc4ea957c1c4dec97ec8faf3..3d8baaaf7086dba0c8fde178891734b2da7d7ec7 100644 (file)
@@ -645,11 +645,12 @@ void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
 EXPORT_SYMBOL(ip_fraglist_prepare);
 
 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
-                 unsigned int ll_rs, unsigned int mtu,
+                 unsigned int ll_rs, unsigned int mtu, bool DF,
                  struct ip_frag_state *state)
 {
        struct iphdr *iph = ip_hdr(skb);
 
+       state->DF = DF;
        state->hlen = hlen;
        state->ll_rs = ll_rs;
        state->mtu = mtu;
@@ -668,9 +669,6 @@ static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
        /* Copy the flags to each fragment. */
        IPCB(to)->flags = IPCB(from)->flags;
 
-       if (IPCB(from)->flags & IPSKB_FRAG_PMTU)
-               state->iph->frag_off |= htons(IP_DF);
-
        /* ANK: dirty, but effective trick. Upgrade options only if
         * the segment to be fragmented was THE FIRST (otherwise,
         * options are already fixed) and make it ONCE
@@ -738,6 +736,8 @@ struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
         */
        iph = ip_hdr(skb2);
        iph->frag_off = htons((state->offset >> 3));
+       if (state->DF)
+               iph->frag_off |= htons(IP_DF);
 
        /*
         *      Added AC : If we are fragmenting a fragment that's not the
@@ -771,6 +771,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
        struct rtable *rt = skb_rtable(skb);
        unsigned int mtu, hlen, ll_rs;
        struct ip_fraglist_iter iter;
+       ktime_t tstamp = skb->tstamp;
        struct ip_frag_state state;
        int err = 0;
 
@@ -846,6 +847,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                ip_fraglist_prepare(skb, &iter);
                        }
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, skb);
 
                        if (!err)
@@ -881,7 +883,8 @@ slow_path:
         *      Fragment the datagram.
         */
 
-       ip_frag_init(skb, hlen, ll_rs, mtu, &state);
+       ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
+                    &state);
 
        /*
         *      Keep copying data until we run out.
@@ -900,6 +903,7 @@ slow_path:
                /*
                 *      Put this fragment into the sending queue.
                 */
+               skb2->tstamp = tstamp;
                err = output(net, sk, skb2);
                if (err)
                        goto fail;
index 313470f6bb148326b4afbc00d265b6a1e40d93bd..716d5472c022d84d1b9305274991bcf44cff8375 100644 (file)
@@ -1794,7 +1794,7 @@ static void ip_encap(struct net *net, struct sk_buff *skb,
        ip_send_check(iph);
 
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
-       nf_reset(skb);
+       nf_reset_ct(skb);
 }
 
 static inline int ipmr_forward_finish(struct net *net, struct sock *sk,
@@ -2140,7 +2140,7 @@ int ip_mr_input(struct sk_buff *skb)
 
                        mroute_sk = rcu_dereference(mrt->mroute_sk);
                        if (mroute_sk) {
-                               nf_reset(skb);
+                               nf_reset_ct(skb);
                                raw_rcv(mroute_sk, skb);
                                return 0;
                        }
index af3fbf76dbd3b1cb1c2d1df8f40009ad6ccea733..6cc5743c553a02fd82ee97fab94f36019b6a1f7c 100644 (file)
@@ -65,7 +65,7 @@ void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum,
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
        /* Avoid counting cloned packets towards the original connection. */
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        /*
index 80da5a66d5d7b6e53034de728cfda19630fa2399..3183413ebc6c223f90594325d02c919fade627e7 100644 (file)
@@ -332,7 +332,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb)
                kfree_skb(skb);
                return NET_RX_DROP;
        }
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        skb_push(skb, skb->data - skb_network_header(skb));
 
index 7dcce724c78bd6bfbffb66671d594803031b13b6..621f83434b2494784ec2b92824a2743be8817e21 100644 (file)
@@ -916,16 +916,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
        if (peer->rate_tokens == 0 ||
            time_after(jiffies,
                       (peer->rate_last +
-                       (ip_rt_redirect_load << peer->rate_tokens)))) {
+                       (ip_rt_redirect_load << peer->n_redirects)))) {
                __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
 
                icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
                peer->rate_last = jiffies;
-               ++peer->rate_tokens;
                ++peer->n_redirects;
 #ifdef CONFIG_IP_ROUTE_VERBOSE
                if (log_martians &&
-                   peer->rate_tokens == ip_rt_redirect_number)
+                   peer->n_redirects == ip_rt_redirect_number)
                        net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
                                             &ip_hdr(skb)->saddr, inet_iif(skb),
                                             &ip_hdr(skb)->daddr, &gw);
@@ -1483,7 +1482,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
        prev = cmpxchg(p, orig, rt);
        if (prev == orig) {
                if (orig) {
-                       dst_dev_put(&orig->dst);
+                       rt_add_uncached_list(orig);
                        dst_release(&orig->dst);
                }
        } else {
@@ -2471,14 +2470,17 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
        int orig_oif = fl4->flowi4_oif;
        unsigned int flags = 0;
        struct rtable *rth;
-       int err = -ENETUNREACH;
+       int err;
 
        if (fl4->saddr) {
-               rth = ERR_PTR(-EINVAL);
                if (ipv4_is_multicast(fl4->saddr) ||
                    ipv4_is_lbcast(fl4->saddr) ||
-                   ipv4_is_zeronet(fl4->saddr))
+                   ipv4_is_zeronet(fl4->saddr)) {
+                       rth = ERR_PTR(-EINVAL);
                        goto out;
+               }
+
+               rth = ERR_PTR(-ENETUNREACH);
 
                /* I removed check for oif == dev_out->oif here.
                   It was wrong for two reasons:
index 79c325a07ba5dc7cfad0a846d1f03bf1787f840b..d8876f0e9672718b4e02bc7aaaac30ecfd4903cb 100644 (file)
@@ -326,7 +326,7 @@ void tcp_enter_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (tcp_memory_pressure)
+       if (READ_ONCE(tcp_memory_pressure))
                return;
        val = jiffies;
 
@@ -341,7 +341,7 @@ void tcp_leave_memory_pressure(struct sock *sk)
 {
        unsigned long val;
 
-       if (!tcp_memory_pressure)
+       if (!READ_ONCE(tcp_memory_pressure))
                return;
        val = xchg(&tcp_memory_pressure, 0);
        if (val)
@@ -450,8 +450,8 @@ void tcp_init_sock(struct sock *sk)
 
        icsk->icsk_sync_mss = tcp_sync_mss;
 
-       sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1];
-       sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1];
+       WRITE_ONCE(sk->sk_sndbuf, sock_net(sk)->ipv4.sysctl_tcp_wmem[1]);
+       WRITE_ONCE(sk->sk_rcvbuf, sock_net(sk)->ipv4.sysctl_tcp_rmem[1]);
 
        sk_sockets_allocated_inc(sk);
        sk->sk_route_forced_caps = NETIF_F_GSO;
@@ -477,7 +477,7 @@ static void tcp_tx_timestamp(struct sock *sk, u16 tsflags)
 static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
                                          int target, struct sock *sk)
 {
-       return (tp->rcv_nxt - tp->copied_seq >= target) ||
+       return (READ_ONCE(tp->rcv_nxt) - READ_ONCE(tp->copied_seq) >= target) ||
                (sk->sk_prot->stream_memory_read ?
                sk->sk_prot->stream_memory_read(sk) : false);
 }
@@ -543,10 +543,10 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
 
        /* Connected or passive Fast Open socket? */
        if (state != TCP_SYN_SENT &&
-           (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+           (state != TCP_SYN_RECV || rcu_access_pointer(tp->fastopen_rsk))) {
                int target = sock_rcvlowat(sk, 0, INT_MAX);
 
-               if (tp->urg_seq == tp->copied_seq &&
+               if (READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq) &&
                    !sock_flag(sk, SOCK_URGINLINE) &&
                    tp->urg_data)
                        target++;
@@ -584,7 +584,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR;
 
        return mask;
@@ -607,7 +607,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                unlock_sock_fast(sk, slow);
                break;
        case SIOCATMARK:
-               answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
+               answ = tp->urg_data &&
+                      READ_ONCE(tp->urg_seq) == READ_ONCE(tp->copied_seq);
                break;
        case SIOCOUTQ:
                if (sk->sk_state == TCP_LISTEN)
@@ -616,7 +617,7 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
                        answ = 0;
                else
-                       answ = tp->write_seq - tp->snd_una;
+                       answ = READ_ONCE(tp->write_seq) - tp->snd_una;
                break;
        case SIOCOUTQNSD:
                if (sk->sk_state == TCP_LISTEN)
@@ -625,7 +626,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
                if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
                        answ = 0;
                else
-                       answ = tp->write_seq - tp->snd_nxt;
+                       answ = READ_ONCE(tp->write_seq) -
+                              READ_ONCE(tp->snd_nxt);
                break;
        default:
                return -ENOIOCTLCMD;
@@ -657,7 +659,7 @@ static void skb_entail(struct sock *sk, struct sk_buff *skb)
        tcb->sacked  = 0;
        __skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
        if (tp->nonagle & TCP_NAGLE_PUSH)
                tp->nonagle &= ~TCP_NAGLE_PUSH;
@@ -1032,10 +1034,10 @@ new_segment:
                skb->len += copy;
                skb->data_len += copy;
                skb->truesize += copy;
-               sk->sk_wmem_queued += copy;
+               sk_wmem_queued_add(sk, copy);
                sk_mem_charge(sk, copy);
                skb->ip_summed = CHECKSUM_PARTIAL;
-               tp->write_seq += copy;
+               WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
                TCP_SKB_CB(skb)->end_seq += copy;
                tcp_skb_pcount_set(skb, 0);
 
@@ -1362,7 +1364,7 @@ new_segment:
                if (!copied)
                        TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
 
-               tp->write_seq += copy;
+               WRITE_ONCE(tp->write_seq, tp->write_seq + copy);
                TCP_SKB_CB(skb)->end_seq += copy;
                tcp_skb_pcount_set(skb, 0);
 
@@ -1668,9 +1670,9 @@ int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
                sk_eat_skb(sk, skb);
                if (!desc->count)
                        break;
-               tp->copied_seq = seq;
+               WRITE_ONCE(tp->copied_seq, seq);
        }
-       tp->copied_seq = seq;
+       WRITE_ONCE(tp->copied_seq, seq);
 
        tcp_rcv_space_adjust(sk);
 
@@ -1699,7 +1701,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
        else
                cap = sock_net(sk)->ipv4.sysctl_tcp_rmem[2] >> 1;
        val = min(val, cap);
-       sk->sk_rcvlowat = val ? : 1;
+       WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
 
        /* Check if we need to signal EPOLLIN right now */
        tcp_data_ready(sk);
@@ -1709,7 +1711,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
 
        val <<= 1;
        if (val > sk->sk_rcvbuf) {
-               sk->sk_rcvbuf = val;
+               WRITE_ONCE(sk->sk_rcvbuf, val);
                tcp_sk(sk)->window_clamp = tcp_win_from_space(sk, val);
        }
        return 0;
@@ -1798,13 +1800,11 @@ static int tcp_zerocopy_receive(struct sock *sk,
                }
                if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
                        int remaining = zc->recv_skip_hint;
-                       int size = skb_frag_size(frags);
 
-                       while (remaining && (size != PAGE_SIZE ||
+                       while (remaining && (skb_frag_size(frags) != PAGE_SIZE ||
                                             skb_frag_off(frags))) {
-                               remaining -= size;
+                               remaining -= skb_frag_size(frags);
                                frags++;
-                               size = skb_frag_size(frags);
                        }
                        zc->recv_skip_hint -= remaining;
                        break;
@@ -1821,7 +1821,7 @@ static int tcp_zerocopy_receive(struct sock *sk,
 out:
        up_read(&current->mm->mmap_sem);
        if (length) {
-               tp->copied_seq = seq;
+               WRITE_ONCE(tp->copied_seq, seq);
                tcp_rcv_space_adjust(sk);
 
                /* Clean up data we have read: This will do ACK frames. */
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
        if (unlikely(flags & MSG_ERRQUEUE))
                return inet_recv_error(sk, msg, len, addr_len);
 
-       if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+       if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
            (sk->sk_state == TCP_ESTABLISHED))
                sk_busy_loop(sk, nonblock);
 
@@ -2119,7 +2119,7 @@ found_ok_skb:
                        if (urg_offset < used) {
                                if (!urg_offset) {
                                        if (!sock_flag(sk, SOCK_URGINLINE)) {
-                                               ++*seq;
+                                               WRITE_ONCE(*seq, *seq + 1);
                                                urg_hole++;
                                                offset++;
                                                used--;
@@ -2141,7 +2141,7 @@ found_ok_skb:
                        }
                }
 
-               *seq += used;
+               WRITE_ONCE(*seq, *seq + used);
                copied += used;
                len -= used;
 
@@ -2168,7 +2168,7 @@ skip_copy:
 
 found_fin_ok:
                /* Process the FIN. */
-               ++*seq;
+               WRITE_ONCE(*seq, *seq + 1);
                if (!(flags & MSG_PEEK))
                        sk_eat_skb(sk, skb);
                break;
@@ -2489,7 +2489,10 @@ adjudge_to_death:
        }
 
        if (sk->sk_state == TCP_CLOSE) {
-               struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+               struct request_sock *req;
+
+               req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+                                               lockdep_sock_is_held(sk));
                /* We could get here with a non-NULL req if the socket is
                 * aborted (e.g., closed with unread data) before 3WHS
                 * finishes.
@@ -2561,6 +2564,7 @@ int tcp_disconnect(struct sock *sk, int flags)
        struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        int old_state = sk->sk_state;
+       u32 seq;
 
        if (old_state != TCP_CLOSE)
                tcp_set_state(sk, TCP_CLOSE);
@@ -2587,7 +2591,7 @@ int tcp_disconnect(struct sock *sk, int flags)
                __kfree_skb(sk->sk_rx_skb_cache);
                sk->sk_rx_skb_cache = NULL;
        }
-       tp->copied_seq = tp->rcv_nxt;
+       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
        tp->urg_data = 0;
        tcp_write_queue_purge(sk);
        tcp_fastopen_active_disable_ofo_check(sk);
@@ -2603,9 +2607,12 @@ int tcp_disconnect(struct sock *sk, int flags)
        tp->srtt_us = 0;
        tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
        tp->rcv_rtt_last_tsecr = 0;
-       tp->write_seq += tp->max_window + 2;
-       if (tp->write_seq == 0)
-               tp->write_seq = 1;
+
+       seq = tp->write_seq + tp->max_window + 2;
+       if (!seq)
+               seq = 1;
+       WRITE_ONCE(tp->write_seq, seq);
+
        icsk->icsk_backoff = 0;
        tp->snd_cwnd = 2;
        icsk->icsk_probes_out = 0;
@@ -2932,9 +2939,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
                if (sk->sk_state != TCP_CLOSE)
                        err = -EPERM;
                else if (tp->repair_queue == TCP_SEND_QUEUE)
-                       tp->write_seq = val;
+                       WRITE_ONCE(tp->write_seq, val);
                else if (tp->repair_queue == TCP_RECV_QUEUE)
-                       tp->rcv_nxt = val;
+                       WRITE_ONCE(tp->rcv_nxt, val);
                else
                        err = -EINVAL;
                break;
@@ -3833,7 +3840,13 @@ EXPORT_SYMBOL(tcp_md5_hash_key);
 
 void tcp_done(struct sock *sk)
 {
-       struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
+       struct request_sock *req;
+
+       /* We might be called with a new socket, after
+        * inet_csk_prepare_forced_close() has been called
+        * so we can not use lockdep_sock_is_held(sk)
+        */
+       req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk, 1);
 
        if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
                TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
index 81a8221d650a94be53d17354c60ddd0c655eaccf..549506162ddeca22f6dd87dfe1c5c13cea6e2b69 100644 (file)
@@ -26,8 +26,9 @@ static void tcp_diag_get_info(struct sock *sk, struct inet_diag_msg *r,
        } else if (sk->sk_type == SOCK_STREAM) {
                const struct tcp_sock *tp = tcp_sk(sk);
 
-               r->idiag_rqueue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
-               r->idiag_wqueue = tp->write_seq - tp->snd_una;
+               r->idiag_rqueue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                            READ_ONCE(tp->copied_seq), 0);
+               r->idiag_wqueue = READ_ONCE(tp->write_seq) - tp->snd_una;
        }
        if (info)
                tcp_get_info(sk, info);
index 3fd451271a7034aab65f02f2bc331254b49f2153..a915ade0c81803a3b190e8c0513220b4e67c35e4 100644 (file)
@@ -253,7 +253,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
         */
        tp = tcp_sk(child);
 
-       tp->fastopen_rsk = req;
+       rcu_assign_pointer(tp->fastopen_rsk, req);
        tcp_rsk(req)->tfo_listener = true;
 
        /* RFC1323: The window in SYN & SYN/ACK segments is never
index 3578357abe30e92f818e5b9acf3317be1d997af5..a2e52ad7cdab3e66a469a8ca850848988b3888d7 100644 (file)
@@ -359,7 +359,8 @@ static void tcp_sndbuf_expand(struct sock *sk)
        sndmem *= nr_segs * per_mss;
 
        if (sk->sk_sndbuf < sndmem)
-               sk->sk_sndbuf = min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]);
+               WRITE_ONCE(sk->sk_sndbuf,
+                          min(sndmem, sock_net(sk)->ipv4.sysctl_tcp_wmem[2]));
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -483,8 +484,9 @@ static void tcp_clamp_window(struct sock *sk)
            !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
            !tcp_under_memory_pressure(sk) &&
            sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
-               sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
-                                   net->ipv4.sysctl_tcp_rmem[2]);
+               WRITE_ONCE(sk->sk_rcvbuf,
+                          min(atomic_read(&sk->sk_rmem_alloc),
+                              net->ipv4.sysctl_tcp_rmem[2]));
        }
        if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
                tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
@@ -648,7 +650,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
                rcvbuf = min_t(u64, rcvwin * rcvmem,
                               sock_net(sk)->ipv4.sysctl_tcp_rmem[2]);
                if (rcvbuf > sk->sk_rcvbuf) {
-                       sk->sk_rcvbuf = rcvbuf;
+                       WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
 
                        /* Make the window clamp follow along.  */
                        tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
@@ -2666,7 +2668,7 @@ static void tcp_process_loss(struct sock *sk, int flag, int num_dupack,
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
-       if ((flag & FLAG_SND_UNA_ADVANCED || tp->fastopen_rsk) &&
+       if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) &&
            tcp_try_undo_loss(sk, false))
                return;
 
@@ -2990,7 +2992,7 @@ void tcp_rearm_rto(struct sock *sk)
        /* If the retrans timer is currently being used by Fast Open
         * for SYN-ACK retrans purpose, stay put.
         */
-       if (tp->fastopen_rsk)
+       if (rcu_access_pointer(tp->fastopen_rsk))
                return;
 
        if (!tp->packets_out) {
@@ -3362,7 +3364,7 @@ static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
 
        sock_owned_by_me((struct sock *)tp);
        tp->bytes_received += delta;
-       tp->rcv_nxt = seq;
+       WRITE_ONCE(tp->rcv_nxt, seq);
 }
 
 /* Update our send window.
@@ -5356,7 +5358,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
        }
 
        tp->urg_data = TCP_URG_NOTYET;
-       tp->urg_seq = ptr;
+       WRITE_ONCE(tp->urg_seq, ptr);
 
        /* Disable header prediction. */
        tp->pred_flags = 0;
@@ -5932,7 +5934,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                /* Ok.. it's good. Set up sequence numbers and
                 * move to established.
                 */
-               tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
+               WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
                /* RFC1323: The window in SYN & SYN/ACK segments is
@@ -5961,7 +5963,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
                /* Remember, tcp_poll() does not lock socket!
                 * Change state from SYN-SENT only after copied_seq
                 * is initialized. */
-               tp->copied_seq = tp->rcv_nxt;
+               WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
 
                smc_check_reset_syn(tp);
 
@@ -6035,8 +6037,8 @@ discard:
                        tp->tcp_header_len = sizeof(struct tcphdr);
                }
 
-               tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-               tp->copied_seq = tp->rcv_nxt;
+               WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1);
+               WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
                tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
 
                /* RFC1323: The window in SYN & SYN/ACK segments is
@@ -6087,6 +6089,8 @@ reset_and_undo:
 
 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
 {
+       struct request_sock *req;
+
        tcp_try_undo_loss(sk, false);
 
        /* Reset rtx states to prevent spurious retransmits_timed_out() */
@@ -6096,7 +6100,9 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
        /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
         * we no longer need req so release it.
         */
-       reqsk_fastopen_remove(sk, tcp_sk(sk)->fastopen_rsk, false);
+       req = rcu_dereference_protected(tcp_sk(sk)->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
+       reqsk_fastopen_remove(sk, req, false);
 
        /* Re-arm the timer because data may have been sent out.
         * This is similar to the regular data transmission case
@@ -6171,7 +6177,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
 
        tcp_mstamp_refresh(tp);
        tp->rx_opt.saw_tstamp = 0;
-       req = tp->fastopen_rsk;
+       req = rcu_dereference_protected(tp->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
        if (req) {
                bool req_stolen;
 
@@ -6211,7 +6218,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
                        tcp_try_undo_spurious_syn(sk);
                        tp->retrans_stamp = 0;
                        tcp_init_transfer(sk, BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB);
-                       tp->copied_seq = tp->rcv_nxt;
+                       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
                }
                smp_mb();
                tcp_set_state(sk, TCP_ESTABLISHED);
index 2ee45e3755e92e60b5e1810e2f68205221b8308d..67b2dc7a17274bd4cdadd01c45a9afdc66dc01d3 100644 (file)
@@ -164,9 +164,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
                 * without appearing to create any others.
                 */
                if (likely(!tp->repair)) {
-                       tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
-                       if (tp->write_seq == 0)
-                               tp->write_seq = 1;
+                       u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
+
+                       if (!seq)
+                               seq = 1;
+                       WRITE_ONCE(tp->write_seq, seq);
                        tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
                        tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
                }
@@ -253,7 +255,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                tp->rx_opt.ts_recent       = 0;
                tp->rx_opt.ts_recent_stamp = 0;
                if (likely(!tp->repair))
-                       tp->write_seq      = 0;
+                       WRITE_ONCE(tp->write_seq, 0);
        }
 
        inet->inet_dport = usin->sin_port;
@@ -291,16 +293,17 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
 
        if (likely(!tp->repair)) {
                if (!tp->write_seq)
-                       tp->write_seq = secure_tcp_seq(inet->inet_saddr,
-                                                      inet->inet_daddr,
-                                                      inet->inet_sport,
-                                                      usin->sin_port);
+                       WRITE_ONCE(tp->write_seq,
+                                  secure_tcp_seq(inet->inet_saddr,
+                                                 inet->inet_daddr,
+                                                 inet->inet_sport,
+                                                 usin->sin_port));
                tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
                                                 inet->inet_saddr,
                                                 inet->inet_daddr);
        }
 
-       inet->inet_id = tp->write_seq ^ jiffies;
+       inet->inet_id = prandom_u32();
 
        if (tcp_fastopen_defer_connect(sk, &err))
                return err;
@@ -478,7 +481,7 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        icsk = inet_csk(sk);
        tp = tcp_sk(sk);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
-       fastopen = tp->fastopen_rsk;
+       fastopen = rcu_dereference(tp->fastopen_rsk);
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
@@ -1447,7 +1450,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
-       newinet->inet_id = newtp->write_seq ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        if (!dst) {
                dst = inet_csk_route_child_sock(sk, newsk, req);
@@ -1644,7 +1647,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
 
 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
 {
-       u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+       u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
        struct skb_shared_info *shinfo;
        const struct tcphdr *th;
        struct tcphdr *thtail;
@@ -1916,7 +1919,7 @@ process:
        if (tcp_v4_inbound_md5_hash(sk, skb))
                goto discard_and_relse;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (tcp_filter(sk, skb))
                goto discard_and_relse;
@@ -2121,7 +2124,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        if (inet_csk(sk)->icsk_bind_hash)
                inet_put_port(sk);
 
-       BUG_ON(tp->fastopen_rsk);
+       BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
 
        /* If socket is aborted during connect operation */
        tcp_free_fastopen_req(tp);
@@ -2455,12 +2458,13 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
                 */
-               rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+               rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                     READ_ONCE(tp->copied_seq), 0);
 
        seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
                        "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
                i, src, srcp, dest, destp, state,
-               tp->write_seq - tp->snd_una,
+               READ_ONCE(tp->write_seq) - tp->snd_una,
                rx_queue,
                timer_active,
                jiffies_delta_to_clock_t(timer_expires - jiffies),
@@ -2677,7 +2681,7 @@ static int __net_init tcp_sk_init(struct net *net)
        net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
        net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
 
-       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
+       net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
        net->ipv4.sysctl_tcp_sack = 1;
        net->ipv4.sysctl_tcp_window_scaling = 1;
        net->ipv4.sysctl_tcp_timestamps = 1;
index bb140a5db8c066e57f1018fd47bccd4628def642..c802bc80c4006f82c2e9189ef1fc11b8f321e70d 100644 (file)
@@ -462,6 +462,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        struct tcp_request_sock *treq = tcp_rsk(req);
        struct inet_connection_sock *newicsk;
        struct tcp_sock *oldtp, *newtp;
+       u32 seq;
 
        if (!newsk)
                return NULL;
@@ -475,12 +476,16 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        /* Now setup tcp_sock */
        newtp->pred_flags = 0;
 
-       newtp->rcv_wup = newtp->copied_seq =
-       newtp->rcv_nxt = treq->rcv_isn + 1;
+       seq = treq->rcv_isn + 1;
+       newtp->rcv_wup = seq;
+       WRITE_ONCE(newtp->copied_seq, seq);
+       WRITE_ONCE(newtp->rcv_nxt, seq);
        newtp->segs_in = 1;
 
-       newtp->snd_sml = newtp->snd_una =
-       newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1;
+       seq = treq->snt_isn + 1;
+       newtp->snd_sml = newtp->snd_una = seq;
+       WRITE_ONCE(newtp->snd_nxt, seq);
+       newtp->snd_up = seq;
 
        INIT_LIST_HEAD(&newtp->tsq_node);
        INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
@@ -495,7 +500,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        newtp->total_retrans = req->num_retrans;
 
        tcp_init_xmit_timers(newsk);
-       newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1;
+       WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
 
        if (sock_flag(newsk, SOCK_KEEPOPEN))
                inet_csk_reset_keepalive_timer(newsk,
@@ -541,7 +546,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
        newtp->rx_opt.mss_clamp = req->mss;
        tcp_ecn_openreq_child(newtp, req);
        newtp->fastopen_req = NULL;
-       newtp->fastopen_rsk = NULL;
+       RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
 
        __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
 
index fec6d67bfd146dc78f0f25173fd71b8b8cc752fe..0488607c5cd3615633af207f0bb41bea0c0176ce 100644 (file)
@@ -67,7 +67,7 @@ static void tcp_event_new_data_sent(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned int prior_packets = tp->packets_out;
 
-       tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+       WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq);
 
        __skb_unlink(skb, &sk->sk_write_queue);
        tcp_rbtree_insert(&sk->tcp_rtx_queue, skb);
@@ -1196,10 +1196,10 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
        struct tcp_sock *tp = tcp_sk(sk);
 
        /* Advance write_seq and place onto the write_queue. */
-       tp->write_seq = TCP_SKB_CB(skb)->end_seq;
+       WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq);
        __skb_header_release(skb);
        tcp_add_write_queue_tail(sk, skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
 }
 
@@ -1333,7 +1333,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
                return -ENOMEM; /* We'll just try again later. */
        skb_copy_decrypted(buff, skb);
 
-       sk->sk_wmem_queued += buff->truesize;
+       sk_wmem_queued_add(sk, buff->truesize);
        sk_mem_charge(sk, buff->truesize);
        nlen = skb->len - len - nsize;
        buff->truesize += nlen;
@@ -1443,7 +1443,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
 
        if (delta_truesize) {
                skb->truesize      -= delta_truesize;
-               sk->sk_wmem_queued -= delta_truesize;
+               sk_wmem_queued_add(sk, -delta_truesize);
                sk_mem_uncharge(sk, delta_truesize);
                sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
        }
@@ -1888,7 +1888,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
                return -ENOMEM;
        skb_copy_decrypted(buff, skb);
 
-       sk->sk_wmem_queued += buff->truesize;
+       sk_wmem_queued_add(sk, buff->truesize);
        sk_mem_charge(sk, buff->truesize);
        buff->truesize += nlen;
        skb->truesize -= nlen;
@@ -2152,7 +2152,7 @@ static int tcp_mtu_probe(struct sock *sk)
        nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
        if (!nskb)
                return -1;
-       sk->sk_wmem_queued += nskb->truesize;
+       sk_wmem_queued_add(sk, nskb->truesize);
        sk_mem_charge(sk, nskb->truesize);
 
        skb = tcp_send_head(sk);
@@ -2482,7 +2482,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto)
        /* Don't do any loss probe on a Fast Open connection before 3WHS
         * finishes.
         */
-       if (tp->fastopen_rsk)
+       if (rcu_access_pointer(tp->fastopen_rsk))
                return false;
 
        early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans;
@@ -3142,7 +3142,7 @@ void tcp_send_fin(struct sock *sk)
                         * if FIN had been sent. This is because retransmit path
                         * does not change tp->snd_nxt.
                         */
-                       tp->snd_nxt++;
+                       WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1);
                        return;
                }
        } else {
@@ -3222,7 +3222,7 @@ int tcp_send_synack(struct sock *sk)
                        tcp_rtx_queue_unlink_and_free(skb, sk);
                        __skb_header_release(nskb);
                        tcp_rbtree_insert(&sk->tcp_rtx_queue, nskb);
-                       sk->sk_wmem_queued += nskb->truesize;
+                       sk_wmem_queued_add(sk, nskb->truesize);
                        sk_mem_charge(sk, nskb->truesize);
                        skb = nskb;
                }
@@ -3426,14 +3426,14 @@ static void tcp_connect_init(struct sock *sk)
        tp->snd_una = tp->write_seq;
        tp->snd_sml = tp->write_seq;
        tp->snd_up = tp->write_seq;
-       tp->snd_nxt = tp->write_seq;
+       WRITE_ONCE(tp->snd_nxt, tp->write_seq);
 
        if (likely(!tp->repair))
                tp->rcv_nxt = 0;
        else
                tp->rcv_tstamp = tcp_jiffies32;
        tp->rcv_wup = tp->rcv_nxt;
-       tp->copied_seq = tp->rcv_nxt;
+       WRITE_ONCE(tp->copied_seq, tp->rcv_nxt);
 
        inet_csk(sk)->icsk_rto = tcp_timeout_init(sk);
        inet_csk(sk)->icsk_retransmits = 0;
@@ -3447,9 +3447,9 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
 
        tcb->end_seq += skb->len;
        __skb_header_release(skb);
-       sk->sk_wmem_queued += skb->truesize;
+       sk_wmem_queued_add(sk, skb->truesize);
        sk_mem_charge(sk, skb->truesize);
-       tp->write_seq = tcb->end_seq;
+       WRITE_ONCE(tp->write_seq, tcb->end_seq);
        tp->packets_out += tcp_skb_pcount(skb);
 }
 
@@ -3586,11 +3586,11 @@ int tcp_connect(struct sock *sk)
        /* We change tp->snd_nxt after the tcp_transmit_skb() call
         * in order to make this packet get counted in tcpOutSegs.
         */
-       tp->snd_nxt = tp->write_seq;
+       WRITE_ONCE(tp->snd_nxt, tp->write_seq);
        tp->pushed_seq = tp->write_seq;
        buff = tcp_send_head(sk);
        if (unlikely(buff)) {
-               tp->snd_nxt     = TCP_SKB_CB(buff)->seq;
+               WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq);
                tp->pushed_seq  = TCP_SKB_CB(buff)->seq;
        }
        TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
index 40de2d2364a1eca14c259d77ebed361d17829eb9..dd5a6317a8018a45ad609f832ced6df2937ad453 100644 (file)
@@ -198,8 +198,13 @@ static bool retransmits_timed_out(struct sock *sk,
                return false;
 
        start_ts = tcp_sk(sk)->retrans_stamp;
-       if (likely(timeout == 0))
-               timeout = tcp_model_timeout(sk, boundary, TCP_RTO_MIN);
+       if (likely(timeout == 0)) {
+               unsigned int rto_base = TCP_RTO_MIN;
+
+               if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
+                       rto_base = tcp_timeout_init(sk);
+               timeout = tcp_model_timeout(sk, boundary, rto_base);
+       }
 
        return (s32)(tcp_time_stamp(tcp_sk(sk)) - start_ts - timeout) >= 0;
 }
@@ -381,15 +386,13 @@ abort:            tcp_write_err(sk);
  *     Timer for Fast Open socket to retransmit SYNACK. Note that the
  *     sk here is the child socket, not the parent (listener) socket.
  */
-static void tcp_fastopen_synack_timer(struct sock *sk)
+static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req)
 {
        struct inet_connection_sock *icsk = inet_csk(sk);
        int max_retries = icsk->icsk_syn_retries ? :
            sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
        struct tcp_sock *tp = tcp_sk(sk);
-       struct request_sock *req;
 
-       req = tcp_sk(sk)->fastopen_rsk;
        req->rsk_ops->syn_ack_timeout(req);
 
        if (req->num_timeout >= max_retries) {
@@ -430,11 +433,14 @@ void tcp_retransmit_timer(struct sock *sk)
        struct tcp_sock *tp = tcp_sk(sk);
        struct net *net = sock_net(sk);
        struct inet_connection_sock *icsk = inet_csk(sk);
+       struct request_sock *req;
 
-       if (tp->fastopen_rsk) {
+       req = rcu_dereference_protected(tp->fastopen_rsk,
+                                       lockdep_sock_is_held(sk));
+       if (req) {
                WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
                             sk->sk_state != TCP_FIN_WAIT1);
-               tcp_fastopen_synack_timer(sk);
+               tcp_fastopen_synack_timer(sk, req);
                /* Before we receive ACK to our SYN-ACK don't retransmit
                 * anything else (e.g., data or FIN segments).
                 */
index cf755156a684373f92c639c274f0fb4ab62aa211..1d58ce829dcae476a7a32a6ab5fa8bb91ec8ae67 100644 (file)
@@ -388,7 +388,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score += 4;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
        return score;
 }
@@ -821,6 +821,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
        int is_udplite = IS_UDPLITE(sk);
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
        __wsum csum = 0;
 
        /*
@@ -854,10 +855,12 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
-               skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(len - sizeof(uh),
-                                                        cork->gso_size);
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
@@ -1313,6 +1316,20 @@ static void udp_set_dev_scratch(struct sk_buff *skb)
                scratch->_tsize_state |= UDP_SKB_IS_STATELESS;
 }
 
+static void udp_skb_csum_unnecessary_set(struct sk_buff *skb)
+{
+       /* We come here after udp_lib_checksum_complete() returned 0.
+        * This means that __skb_checksum_complete() might have
+        * set skb->csum_valid to 1.
+        * On 64bit platforms, we can set csum_unnecessary
+        * to true, but only if the skb is not shared.
+        */
+#if BITS_PER_LONG == 64
+       if (!skb_shared(skb))
+               udp_skb_scratch(skb)->csum_unnecessary = true;
+#endif
+}
+
 static int udp_skb_truesize(struct sk_buff *skb)
 {
        return udp_skb_scratch(skb)->_tsize_state & ~UDP_SKB_IS_STATELESS;
@@ -1547,10 +1564,7 @@ static struct sk_buff *__first_packet_length(struct sock *sk,
                        *total += skb->truesize;
                        kfree_skb(skb);
                } else {
-                       /* the csum related bits could be changed, refresh
-                        * the scratch area
-                        */
-                       udp_set_dev_scratch(skb);
+                       udp_skb_csum_unnecessary_set(skb);
                        break;
                }
        }
@@ -1574,7 +1588,7 @@ static int first_packet_length(struct sock *sk)
 
        spin_lock_bh(&rcvq->lock);
        skb = __first_packet_length(sk, rcvq, &total);
-       if (!skb && !skb_queue_empty(sk_queue)) {
+       if (!skb && !skb_queue_empty_lockless(sk_queue)) {
                spin_lock(&sk_queue->lock);
                skb_queue_splice_tail_init(sk_queue, rcvq);
                spin_unlock(&sk_queue->lock);
@@ -1647,7 +1661,7 @@ struct sk_buff *__skb_recv_udp(struct sock *sk, unsigned int flags,
                                return skb;
                        }
 
-                       if (skb_queue_empty(sk_queue)) {
+                       if (skb_queue_empty_lockless(sk_queue)) {
                                spin_unlock_bh(&queue->lock);
                                goto busy_check;
                        }
@@ -1673,7 +1687,7 @@ busy_check:
                                break;
 
                        sk_busy_loop(sk, flags & MSG_DONTWAIT);
-               } while (!skb_queue_empty(sk_queue));
+               } while (!skb_queue_empty_lockless(sk_queue));
 
                /* sk_queue is empty, reader_queue may contain peeked packets */
        } while (timeo &&
@@ -1969,7 +1983,7 @@ static int udp_queue_rcv_one_skb(struct sock *sk, struct sk_buff *skb)
         */
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (static_branch_unlikely(&udp_encap_needed_key) && up->encap_type) {
                int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
@@ -2298,7 +2312,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
 
        if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
                goto drop;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        /* No socket. Drop packet silently, if checksum is wrong */
        if (udp_lib_checksum_complete(skb))
@@ -2709,7 +2723,7 @@ __poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
        __poll_t mask = datagram_poll(file, sock, wait);
        struct sock *sk = sock->sk;
 
-       if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
+       if (!skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Check for false positives due to checksum errors */
index 6a576ff92c399fbb686c839c525e700f2579b1a9..34ccef18b40e60e283a63c18102fef8b927cbfaf 100644 (file)
@@ -5964,13 +5964,20 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
        switch (event) {
        case RTM_NEWADDR:
                /*
-                * If the address was optimistic
-                * we inserted the route at the start of
-                * our DAD process, so we don't need
-                * to do it again
+                * If the address was optimistic we inserted the route at the
+                * start of our DAD process, so we don't need to do it again.
+                * If the device was taken down in the middle of the DAD
+                * cycle there is a race where we could get here without a
+                * host route, so nothing to insert. That will be fixed when
+                * the device is brought up.
                 */
-               if (!rcu_access_pointer(ifp->rt->fib6_node))
+               if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
                        ip6_ins_rt(net, ifp->rt);
+               } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
+                       pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
+                               &ifp->addr, ifp->idev->dev->name);
+               }
+
                if (ifp->idev->cnf.forwarding)
                        addrconf_join_anycast(ifp);
                if (!ipv6_addr_any(&ifp->peer_addr))
index 783f3c1466daca7ea3d348fa1cc766fcc9026d62..2fc079284ca43886652ce394d701b153d18cf32f 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/export.h>
 #include <net/ipv6.h>
 #include <net/ipv6_stubs.h>
+#include <net/addrconf.h>
 #include <net/ip.h>
 
 /* if ipv6 module registers this function is used by xfrm to force all
index cf60fae9533b230455d89007a9ac45af958f2efe..fbe9d4295eac38d247aca362d6006cdec2d7f7da 100644 (file)
@@ -105,7 +105,7 @@ static inline int compute_score(struct sock *sk, struct net *net,
                        return -1;
 
                score = 1;
-               if (sk->sk_incoming_cpu == raw_smp_processor_id())
+               if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                        score++;
        }
        return score;
index d5779d6a60650ba9ce6a72d6246a829e2df733e0..923034c52ce40d85c91a54d1b107e2088d31bb85 100644 (file)
@@ -980,9 +980,9 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
                dsfield = key->tos;
                if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
                        goto tx_err;
-               md = ip_tunnel_info_opts(tun_info);
-               if (!md)
+               if (tun_info->options_len < sizeof(*md))
                        goto tx_err;
+               md = ip_tunnel_info_opts(tun_info);
 
                tun_id = tunnel_id_to_key32(key->tun_id);
                if (md->version == 1) {
@@ -2192,6 +2192,7 @@ static void ip6erspan_tap_setup(struct net_device *dev)
 {
        ether_setup(dev);
 
+       dev->max_mtu = 0;
        dev->netdev_ops = &ip6erspan_netdev_ops;
        dev->needs_free_netdev = true;
        dev->priv_destructor = ip6gre_dev_free;
index d432d0011c160f41aec09640e95179dd7b364cfc..3d71c7d6102c45cd89a68e3a143c0b7e872294ce 100644 (file)
@@ -223,6 +223,16 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
        if (ipv6_addr_is_multicast(&hdr->saddr))
                goto err;
 
+       /* While RFC4291 is not explicit about v4mapped addresses
+        * in IPv6 headers, it seems clear linux dual-stack
+        * model can not deal properly with these.
+        * Security models could be fooled by ::ffff:127.0.0.1 for example.
+        *
+        * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
+        */
+       if (ipv6_addr_v4mapped(&hdr->saddr))
+               goto err;
+
        skb->transport_header = skb->network_header + sizeof(*hdr);
        IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
 
@@ -371,7 +381,7 @@ resubmit_final:
                        /* Free reference early: we don't need it any more,
                           and it may hold ip_conntrack module loaded
                           indefinitely. */
-                       nf_reset(skb);
+                       nf_reset_ct(skb);
 
                        skb_postpull_rcsum(skb, skb_network_header(skb),
                                           skb_network_header_len(skb));
index edadee4a7e76105f737d705052db8f5bbc6c0152..71827b56c0063b56bcfbef4bd8910ddcec035824 100644 (file)
@@ -768,6 +768,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                inet6_sk(skb->sk) : NULL;
        struct ip6_frag_state state;
        unsigned int mtu, hlen, nexthdr_offset;
+       ktime_t tstamp = skb->tstamp;
        int hroom, err = 0;
        __be32 frag_id;
        u8 *prevhdr, nexthdr = 0;
@@ -855,6 +856,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                        if (iter.frag)
                                ip6_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, skb);
                        if (!err)
                                IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
@@ -913,6 +915,7 @@ slow_path:
                /*
                 *      Put this fragment into the sending queue.
                 */
+               frag->tstamp = tstamp;
                err = output(net, sk, frag);
                if (err)
                        goto fail;
index a9bff556d3b2ddd5334d20a63210bad0dfa6cd2d..409e79b84a830dd22ffe7728f45d9dc65ec01df4 100644 (file)
@@ -119,6 +119,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                                  struct sk_buff *))
 {
        int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
+       ktime_t tstamp = skb->tstamp;
        struct ip6_frag_state state;
        u8 *prevhdr, nexthdr = 0;
        unsigned int mtu, hlen;
@@ -183,6 +184,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
                        if (iter.frag)
                                ip6_fraglist_prepare(skb, &iter);
 
+                       skb->tstamp = tstamp;
                        err = output(net, sk, data, skb);
                        if (err || !iter.frag)
                                break;
@@ -215,6 +217,7 @@ slow_path:
                        goto blackhole;
                }
 
+               skb2->tstamp = tstamp;
                err = output(net, sk, data, skb2);
                if (err)
                        goto blackhole;
index e6c9da9866b1bb527363dbeaba2e378a139f2ae1..a0a2de30be3e7b6fa9aa34dcc6a918e566713e07 100644 (file)
@@ -54,7 +54,7 @@ void nf_dup_ipv6(struct net *net, struct sk_buff *skb, unsigned int hooknum,
                return;
 
 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
 #endif
        if (hooknum == NF_INET_PRE_ROUTING ||
index 6e1888ee403628fcefe1c9afca4d5de81b249ab2..a77f6b7d3a7c08ff712d873b5231aca8a9eeefde 100644 (file)
@@ -215,7 +215,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
 
                        /* Not releasing hash table! */
                        if (clone) {
-                               nf_reset(clone);
+                               nf_reset_ct(clone);
                                rawv6_rcv(sk, clone);
                        }
                }
index e3d9f4559c99f252eba448845cce434bc53f3fd8..4804b6dc5e6519a457e631bc1438a14f85477567 100644 (file)
@@ -215,7 +215,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
            !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
                tp->rx_opt.ts_recent = 0;
                tp->rx_opt.ts_recent_stamp = 0;
-               tp->write_seq = 0;
+               WRITE_ONCE(tp->write_seq, 0);
        }
 
        sk->sk_v6_daddr = usin->sin6_addr;
@@ -311,10 +311,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
 
        if (likely(!tp->repair)) {
                if (!tp->write_seq)
-                       tp->write_seq = secure_tcpv6_seq(np->saddr.s6_addr32,
-                                                        sk->sk_v6_daddr.s6_addr32,
-                                                        inet->inet_sport,
-                                                        inet->inet_dport);
+                       WRITE_ONCE(tp->write_seq,
+                                  secure_tcpv6_seq(np->saddr.s6_addr32,
+                                                   sk->sk_v6_daddr.s6_addr32,
+                                                   inet->inet_sport,
+                                                   inet->inet_dport));
                tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
                                                   np->saddr.s6_addr32,
                                                   sk->sk_v6_daddr.s6_addr32);
@@ -406,7 +407,7 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
 
        tp = tcp_sk(sk);
        /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
-       fastopen = tp->fastopen_rsk;
+       fastopen = rcu_dereference(tp->fastopen_rsk);
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
@@ -1895,7 +1896,8 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                /* Because we don't lock the socket,
                 * we might find a transient negative value.
                 */
-               rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+               rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
+                                     READ_ONCE(tp->copied_seq), 0);
 
        seq_printf(seq,
                   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
@@ -1906,7 +1908,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
                   dest->s6_addr32[0], dest->s6_addr32[1],
                   dest->s6_addr32[2], dest->s6_addr32[3], destp,
                   state,
-                  tp->write_seq - tp->snd_una,
+                  READ_ONCE(tp->write_seq) - tp->snd_una,
                   rx_queue,
                   timer_active,
                   jiffies_delta_to_clock_t(timer_expires - jiffies),
index aae4938f3deab284ecfd78f0c601647fbb9b03d4..9fec580c968e0d331fcaf54289364526ebd902c2 100644 (file)
@@ -135,7 +135,7 @@ static int compute_score(struct sock *sk, struct net *net,
                return -1;
        score++;
 
-       if (sk->sk_incoming_cpu == raw_smp_processor_id())
+       if (READ_ONCE(sk->sk_incoming_cpu) == raw_smp_processor_id())
                score++;
 
        return score;
@@ -1109,6 +1109,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
        __wsum csum = 0;
        int offset = skb_transport_offset(skb);
        int len = skb->len - offset;
+       int datalen = len - sizeof(*uh);
 
        /*
         * Create a UDP header
@@ -1141,8 +1142,12 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6,
                        return -EIO;
                }
 
-               skb_shinfo(skb)->gso_size = cork->gso_size;
-               skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+               if (datalen > cork->gso_size) {
+                       skb_shinfo(skb)->gso_size = cork->gso_size;
+                       skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+                       skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(datalen,
+                                                                cork->gso_size);
+               }
                goto csum_partial;
        }
 
index 105e5a7092e75ace14aa29bebce626ad9cebc435..f82ea12bac378fbdd5a64cba62665300b691897c 100644 (file)
@@ -1078,7 +1078,7 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len
        memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
        IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
                              IPSKB_REROUTED);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        bh_lock_sock(sk);
        if (sock_owned_by_user(sk)) {
index bd3f39349d4084021d2bdc1bd385f877b4a845e3..d3b520b9b2c9d226f90b579b7b40e8baf459f73a 100644 (file)
@@ -56,7 +56,6 @@ static int l2tp_eth_dev_init(struct net_device *dev)
 {
        eth_hw_addr_random(dev);
        eth_broadcast_addr(dev->broadcast);
-       netdev_lockdep_set_classes(dev);
 
        return 0;
 }
@@ -151,7 +150,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        skb->ip_summed = CHECKSUM_NONE;
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        rcu_read_lock();
        dev = rcu_dereference(spriv->dev);
index 622833317dcbc0e29c8bdb6d5be03a666ff740d5..0d7c887a2b75db65afba7955a2bf9572a6a37786 100644 (file)
@@ -193,7 +193,7 @@ pass_up:
        if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
index 687e23a8b326675503f01ee228f52eb02ef881d9..802f19aba7e32e63846036f526d228ea8015053c 100644 (file)
@@ -206,7 +206,7 @@ pass_up:
        if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
                goto discard_put;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        return sk_receive_skb(sk, skb, 1);
 
index 2017b7d780f5af73c1ac7461113842776d1b00fc..c74f44dfaa22a5020880ea218c6607ace8fd5e22 100644 (file)
@@ -113,22 +113,26 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
  *
  *     Send data via reliable llc2 connection.
  *     Returns 0 upon success, non-zero if action did not succeed.
+ *
+ *     This function always consumes a reference to the skb.
  */
 static int llc_ui_send_data(struct sock* sk, struct sk_buff *skb, int noblock)
 {
        struct llc_sock* llc = llc_sk(sk);
-       int rc = 0;
 
        if (unlikely(llc_data_accept_state(llc->state) ||
                     llc->remote_busy_flag ||
                     llc->p_flag)) {
                long timeout = sock_sndtimeo(sk, noblock);
+               int rc;
 
                rc = llc_ui_wait_for_busy_core(sk, timeout);
+               if (rc) {
+                       kfree_skb(skb);
+                       return rc;
+               }
        }
-       if (unlikely(!rc))
-               rc = llc_build_and_send_pkt(sk, skb);
-       return rc;
+       return llc_build_and_send_pkt(sk, skb);
 }
 
 static void llc_ui_sk_init(struct socket *sock, struct sock *sk)
@@ -899,7 +903,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        int flags = msg->msg_flags;
        int noblock = flags & MSG_DONTWAIT;
-       struct sk_buff *skb;
+       struct sk_buff *skb = NULL;
        size_t size = 0;
        int rc = -EINVAL, copied = 0, hdrlen;
 
@@ -908,10 +912,10 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        lock_sock(sk);
        if (addr) {
                if (msg->msg_namelen < sizeof(*addr))
-                       goto release;
+                       goto out;
        } else {
                if (llc_ui_addr_null(&llc->addr))
-                       goto release;
+                       goto out;
                addr = &llc->addr;
        }
        /* must bind connection to sap if user hasn't done it. */
@@ -919,7 +923,7 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                /* bind to sap with null dev, exclusive. */
                rc = llc_ui_autobind(sock, addr);
                if (rc)
-                       goto release;
+                       goto out;
        }
        hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
        size = hdrlen + len;
@@ -928,12 +932,12 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        copied = size - hdrlen;
        rc = -EINVAL;
        if (copied < 0)
-               goto release;
+               goto out;
        release_sock(sk);
        skb = sock_alloc_send_skb(sk, size, noblock, &rc);
        lock_sock(sk);
        if (!skb)
-               goto release;
+               goto out;
        skb->dev      = llc->dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
        skb_reserve(skb, hdrlen);
@@ -943,29 +947,31 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
        if (sk->sk_type == SOCK_DGRAM || addr->sllc_ua) {
                llc_build_and_send_ui_pkt(llc->sap, skb, addr->sllc_mac,
                                          addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        if (addr->sllc_test) {
                llc_build_and_send_test_pkt(llc->sap, skb, addr->sllc_mac,
                                            addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        if (addr->sllc_xid) {
                llc_build_and_send_xid_pkt(llc->sap, skb, addr->sllc_mac,
                                           addr->sllc_sap);
+               skb = NULL;
                goto out;
        }
        rc = -ENOPROTOOPT;
        if (!(sk->sk_type == SOCK_STREAM && !addr->sllc_ua))
                goto out;
        rc = llc_ui_send_data(sk, skb, noblock);
+       skb = NULL;
 out:
-       if (rc) {
-               kfree_skb(skb);
-release:
+       kfree_skb(skb);
+       if (rc)
                dprintk("%s: failed sending from %02X to %02X: %d\n",
                        __func__, llc->laddr.lsap, llc->daddr.lsap, rc);
-       }
        release_sock(sk);
        return rc ? : copied;
 }
index 4d78375f9872d1a6962aa0e7cda7b913a6e0a7f3..647c0554d04cd89d19563f6ef5b609b9311fd911 100644 (file)
@@ -372,6 +372,7 @@ int llc_conn_ac_send_i_cmd_p_set_1(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 1, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
+               skb_get(skb);
                llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
@@ -389,7 +390,8 @@ static int llc_conn_ac_send_i_cmd_p_set_0(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
-               rc = llc_conn_send_pdu(sk, skb);
+               skb_get(skb);
+               llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
        return rc;
@@ -406,6 +408,7 @@ int llc_conn_ac_send_i_xxx_x_set_0(struct sock *sk, struct sk_buff *skb)
        llc_pdu_init_as_i_cmd(skb, 0, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
+               skb_get(skb);
                llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
@@ -916,7 +919,8 @@ static int llc_conn_ac_send_i_rsp_f_set_ackpf(struct sock *sk,
        llc_pdu_init_as_i_cmd(skb, llc->ack_pf, llc->vS, llc->vR);
        rc = llc_mac_hdr_init(skb, llc->dev->dev_addr, llc->daddr.mac);
        if (likely(!rc)) {
-               rc = llc_conn_send_pdu(sk, skb);
+               skb_get(skb);
+               llc_conn_send_pdu(sk, skb);
                llc_conn_ac_inc_vs_by_1(sk, skb);
        }
        return rc;
index 4ff89cb7c86f785a175b048c0c7c6a82c2f8fe8c..7b620acaca9ec194e03b590c21fb004401dbc7ce 100644 (file)
@@ -30,7 +30,7 @@
 #endif
 
 static int llc_find_offset(int state, int ev_type);
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *skb);
+static void llc_conn_send_pdus(struct sock *sk);
 static int llc_conn_service(struct sock *sk, struct sk_buff *skb);
 static int llc_exec_conn_trans_actions(struct sock *sk,
                                       struct llc_conn_state_trans *trans,
@@ -55,6 +55,8 @@ int sysctl_llc2_busy_timeout = LLC2_BUSY_TIME * HZ;
  *     (executing it's actions and changing state), upper layer will be
  *     indicated or confirmed, if needed. Returns 0 for success, 1 for
  *     failure. The socket lock has to be held before calling this function.
+ *
+ *     This function always consumes a reference to the skb.
  */
 int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
 {
@@ -62,12 +64,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
        struct llc_sock *llc = llc_sk(skb->sk);
        struct llc_conn_state_ev *ev = llc_conn_ev(skb);
 
-       /*
-        * We have to hold the skb, because llc_conn_service will kfree it in
-        * the sending path and we need to look at the skb->cb, where we encode
-        * llc_conn_state_ev.
-        */
-       skb_get(skb);
        ev->ind_prim = ev->cfm_prim = 0;
        /*
         * Send event to state machine
@@ -75,21 +71,12 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
        rc = llc_conn_service(skb->sk, skb);
        if (unlikely(rc != 0)) {
                printk(KERN_ERR "%s: llc_conn_service failed\n", __func__);
-               goto out_kfree_skb;
-       }
-
-       if (unlikely(!ev->ind_prim && !ev->cfm_prim)) {
-               /* indicate or confirm not required */
-               if (!skb->next)
-                       goto out_kfree_skb;
                goto out_skb_put;
        }
 
-       if (unlikely(ev->ind_prim && ev->cfm_prim)) /* Paranoia */
-               skb_get(skb);
-
        switch (ev->ind_prim) {
        case LLC_DATA_PRIM:
+               skb_get(skb);
                llc_save_primitive(sk, skb, LLC_DATA_PRIM);
                if (unlikely(sock_queue_rcv_skb(sk, skb))) {
                        /*
@@ -106,6 +93,7 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                 * skb->sk pointing to the newly created struct sock in
                 * llc_conn_handler. -acme
                 */
+               skb_get(skb);
                skb_queue_tail(&sk->sk_receive_queue, skb);
                sk->sk_state_change(sk);
                break;
@@ -121,7 +109,6 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                                sk->sk_state_change(sk);
                        }
                }
-               kfree_skb(skb);
                sock_put(sk);
                break;
        case LLC_RESET_PRIM:
@@ -130,14 +117,11 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                 * RESET is not being notified to upper layers for now
                 */
                printk(KERN_INFO "%s: received a reset ind!\n", __func__);
-               kfree_skb(skb);
                break;
        default:
-               if (ev->ind_prim) {
+               if (ev->ind_prim)
                        printk(KERN_INFO "%s: received unknown %d prim!\n",
                                __func__, ev->ind_prim);
-                       kfree_skb(skb);
-               }
                /* No indication */
                break;
        }
@@ -179,25 +163,22 @@ int llc_conn_state_process(struct sock *sk, struct sk_buff *skb)
                printk(KERN_INFO "%s: received a reset conf!\n", __func__);
                break;
        default:
-               if (ev->cfm_prim) {
+               if (ev->cfm_prim)
                        printk(KERN_INFO "%s: received unknown %d prim!\n",
                                        __func__, ev->cfm_prim);
-                       break;
-               }
-               goto out_skb_put; /* No confirmation */
+               /* No confirmation */
+               break;
        }
-out_kfree_skb:
-       kfree_skb(skb);
 out_skb_put:
        kfree_skb(skb);
        return rc;
 }
 
-int llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
+void llc_conn_send_pdu(struct sock *sk, struct sk_buff *skb)
 {
        /* queue PDU to send to MAC layer */
        skb_queue_tail(&sk->sk_write_queue, skb);
-       return llc_conn_send_pdus(sk, skb);
+       llc_conn_send_pdus(sk);
 }
 
 /**
@@ -255,7 +236,7 @@ void llc_conn_resend_i_pdu_as_cmd(struct sock *sk, u8 nr, u8 first_p_bit)
        if (howmany_resend > 0)
                llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
        /* any PDUs to re-send are queued up; start sending to MAC */
-       llc_conn_send_pdus(sk, NULL);
+       llc_conn_send_pdus(sk);
 out:;
 }
 
@@ -296,7 +277,7 @@ void llc_conn_resend_i_pdu_as_rsp(struct sock *sk, u8 nr, u8 first_f_bit)
        if (howmany_resend > 0)
                llc->vS = (llc->vS + 1) % LLC_2_SEQ_NBR_MODULO;
        /* any PDUs to re-send are queued up; start sending to MAC */
-       llc_conn_send_pdus(sk, NULL);
+       llc_conn_send_pdus(sk);
 out:;
 }
 
@@ -340,16 +321,12 @@ out:
 /**
  *     llc_conn_send_pdus - Sends queued PDUs
  *     @sk: active connection
- *     @hold_skb: the skb held by caller, or NULL if does not care
  *
- *     Sends queued pdus to MAC layer for transmission. When @hold_skb is
- *     NULL, always return 0. Otherwise, return 0 if @hold_skb is sent
- *     successfully, or 1 for failure.
+ *     Sends queued pdus to MAC layer for transmission.
  */
-static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
+static void llc_conn_send_pdus(struct sock *sk)
 {
        struct sk_buff *skb;
-       int ret = 0;
 
        while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) {
                struct llc_pdu_sn *pdu = llc_pdu_sn_hdr(skb);
@@ -361,20 +338,10 @@ static int llc_conn_send_pdus(struct sock *sk, struct sk_buff *hold_skb)
                        skb_queue_tail(&llc_sk(sk)->pdu_unack_q, skb);
                        if (!skb2)
                                break;
-                       dev_queue_xmit(skb2);
-               } else {
-                       bool is_target = skb == hold_skb;
-                       int rc;
-
-                       if (is_target)
-                               skb_get(skb);
-                       rc = dev_queue_xmit(skb);
-                       if (is_target)
-                               ret = rc;
+                       skb = skb2;
                }
+               dev_queue_xmit(skb);
        }
-
-       return ret;
 }
 
 /**
@@ -846,7 +813,7 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
        else {
                dprintk("%s: adding to backlog...\n", __func__);
                llc_set_backlog_type(skb, LLC_PACKET);
-               if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+               if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                        goto drop_unlock;
        }
 out:
index 8db03c2d5440b12536b3b7016bc5c01b56666d8a..ad6547736c219dafe42c2014257c0c87f372934f 100644 (file)
@@ -38,6 +38,8 @@
  *     closed and -EBUSY when sending data is not permitted in this state or
  *     LLC has send an I pdu with p bit set to 1 and is waiting for it's
  *     response.
+ *
+ *     This function always consumes a reference to the skb.
  */
 int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
 {
@@ -46,20 +48,22 @@ int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb)
        struct llc_sock *llc = llc_sk(sk);
 
        if (unlikely(llc->state == LLC_CONN_STATE_ADM))
-               goto out;
+               goto out_free;
        rc = -EBUSY;
        if (unlikely(llc_data_accept_state(llc->state) || /* data_conn_refuse */
                     llc->p_flag)) {
                llc->failed_data_req = 1;
-               goto out;
+               goto out_free;
        }
        ev = llc_conn_ev(skb);
        ev->type      = LLC_CONN_EV_TYPE_PRIM;
        ev->prim      = LLC_DATA_PRIM;
        ev->prim_type = LLC_PRIM_TYPE_REQ;
        skb->dev      = llc->dev;
-       rc = llc_conn_state_process(sk, skb);
-out:
+       return llc_conn_state_process(sk, skb);
+
+out_free:
+       kfree_skb(skb);
        return rc;
 }
 
index a94bd56bcac6f74110a0f231814fb45f4a600f30..7ae4cc684d3abe351a72110f3041ebe003063d26 100644 (file)
@@ -58,8 +58,10 @@ int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_ui_cmd(skb);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
@@ -81,8 +83,10 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
@@ -135,8 +139,10 @@ int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
                            ev->daddr.lsap, LLC_PDU_CMD);
        llc_pdu_init_as_test_cmd(skb);
        rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
-       if (likely(!rc))
+       if (likely(!rc)) {
+               skb_get(skb);
                rc = dev_queue_xmit(skb);
+       }
        return rc;
 }
 
index a7f7b8ff47292b476b4e3ec2e17e05a6fa0ee3ca..be419062e19a605f198585e56ef20a7da8774187 100644 (file)
@@ -197,29 +197,22 @@ out:
  *     After executing actions of the event, upper layer will be indicated
  *     if needed(on receiving an UI frame). sk can be null for the
  *     datalink_proto case.
+ *
+ *     This function always consumes a reference to the skb.
  */
 static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
 {
        struct llc_sap_state_ev *ev = llc_sap_ev(skb);
 
-       /*
-        * We have to hold the skb, because llc_sap_next_state
-        * will kfree it in the sending path and we need to
-        * look at the skb->cb, where we encode llc_sap_state_ev.
-        */
-       skb_get(skb);
        ev->ind_cfm_flag = 0;
        llc_sap_next_state(sap, skb);
-       if (ev->ind_cfm_flag == LLC_IND) {
-               if (skb->sk->sk_state == TCP_LISTEN)
-                       kfree_skb(skb);
-               else {
-                       llc_save_primitive(skb->sk, skb, ev->prim);
 
-                       /* queue skb to the user. */
-                       if (sock_queue_rcv_skb(skb->sk, skb))
-                               kfree_skb(skb);
-               }
+       if (ev->ind_cfm_flag == LLC_IND && skb->sk->sk_state != TCP_LISTEN) {
+               llc_save_primitive(skb->sk, skb, ev->prim);
+
+               /* queue skb to the user. */
+               if (sock_queue_rcv_skb(skb->sk, skb) == 0)
+                       return;
        }
        kfree_skb(skb);
 }
index b1438fd4d876082b1887d5410ee66ca8fe2b2cc2..64b544ae9966bc8819380fdc24d5ed8d53ea25be 100644 (file)
@@ -487,9 +487,14 @@ static ssize_t ieee80211_if_fmt_aqm(
        const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
 {
        struct ieee80211_local *local = sdata->local;
-       struct txq_info *txqi = to_txq_info(sdata->vif.txq);
+       struct txq_info *txqi;
        int len;
 
+       if (!sdata->vif.txq)
+               return 0;
+
+       txqi = to_txq_info(sdata->vif.txq);
+
        spin_lock_bh(&local->fq.lock);
        rcu_read_lock();
 
@@ -658,7 +663,9 @@ static void add_common_files(struct ieee80211_sub_if_data *sdata)
        DEBUGFS_ADD(rc_rateidx_vht_mcs_mask_5ghz);
        DEBUGFS_ADD(hw_queues);
 
-       if (sdata->local->ops->wake_tx_queue)
+       if (sdata->local->ops->wake_tx_queue &&
+           sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE &&
+           sdata->vif.type != NL80211_IFTYPE_NAN)
                DEBUGFS_ADD(aqm);
 }
 
index 26a2f49208b6a30623667f21dfb0b18fd7aba2d2..54dd8849d1cc7dd25117f3fab41f286fb4b70f14 100644 (file)
@@ -2633,7 +2633,8 @@ struct sk_buff *ieee80211_ap_probereq_get(struct ieee80211_hw *hw,
 
        rcu_read_lock();
        ssid = ieee80211_bss_get_ie(cbss, WLAN_EID_SSID);
-       if (WARN_ON_ONCE(ssid == NULL))
+       if (WARN_ONCE(!ssid || ssid[1] > IEEE80211_MAX_SSID_LEN,
+                     "invalid SSID element (len=%d)", ssid ? ssid[1] : -1))
                ssid_len = 0;
        else
                ssid_len = ssid[1];
@@ -5233,7 +5234,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
 
        rcu_read_lock();
        ssidie = ieee80211_bss_get_ie(req->bss, WLAN_EID_SSID);
-       if (!ssidie) {
+       if (!ssidie || ssidie[1] > sizeof(assoc_data->ssid)) {
                rcu_read_unlock();
                kfree(assoc_data);
                return -EINVAL;
index 768d14c9a7161c56c78d11253bb6e152c999ecdb..0e05ff0376726ddbaf80f05d9f90f6f086225b88 100644 (file)
@@ -3467,9 +3467,18 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
        case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
                /* process for all: mesh, mlme, ibss */
                break;
+       case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+               if (is_multicast_ether_addr(mgmt->da) &&
+                   !is_broadcast_ether_addr(mgmt->da))
+                       return RX_DROP_MONITOR;
+
+               /* process only for station/IBSS */
+               if (sdata->vif.type != NL80211_IFTYPE_STATION &&
+                   sdata->vif.type != NL80211_IFTYPE_ADHOC)
+                       return RX_DROP_MONITOR;
+               break;
        case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
        case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
-       case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
        case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
                if (is_multicast_ether_addr(mgmt->da) &&
                    !is_broadcast_ether_addr(mgmt->da))
index adf94ba1ed77a5c44ae8479541d03c63466c62ab..4d31d9688dc230275c49b34c876b36ad2b635a1b 100644 (file)
@@ -520,10 +520,33 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local,
        return 0;
 }
 
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_local *local = sdata->local;
+       struct ieee80211_sub_if_data *sdata_iter;
+
+       if (!ieee80211_is_radar_required(local))
+               return true;
+
+       if (!regulatory_pre_cac_allowed(local->hw.wiphy))
+               return false;
+
+       mutex_lock(&local->iflist_mtx);
+       list_for_each_entry(sdata_iter, &local->interfaces, list) {
+               if (sdata_iter->wdev.cac_started) {
+                       mutex_unlock(&local->iflist_mtx);
+                       return false;
+               }
+       }
+       mutex_unlock(&local->iflist_mtx);
+
+       return true;
+}
+
 static bool ieee80211_can_scan(struct ieee80211_local *local,
                               struct ieee80211_sub_if_data *sdata)
 {
-       if (ieee80211_is_radar_required(local))
+       if (!__ieee80211_can_leave_ch(sdata))
                return false;
 
        if (!list_empty(&local->roc_list))
@@ -630,7 +653,10 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
 
        lockdep_assert_held(&local->mtx);
 
-       if (local->scan_req || ieee80211_is_radar_required(local))
+       if (local->scan_req)
+               return -EBUSY;
+
+       if (!__ieee80211_can_leave_ch(sdata))
                return -EBUSY;
 
        if (!ieee80211_can_scan(local, sdata)) {
index 051a02ddcb854d5dae0eb33e742129ffcf203848..32a7a53833c01d1ea760646440f54009ae9c4544 100644 (file)
@@ -247,7 +247,8 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
        struct sta_info *sta;
        int i;
 
-       spin_lock_bh(&fq->lock);
+       local_bh_disable();
+       spin_lock(&fq->lock);
 
        if (sdata->vif.type == NL80211_IFTYPE_AP)
                ps = &sdata->bss->ps;
@@ -273,9 +274,9 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
                                                &txqi->flags))
                                continue;
 
-                       spin_unlock_bh(&fq->lock);
+                       spin_unlock(&fq->lock);
                        drv_wake_tx_queue(local, txqi);
-                       spin_lock_bh(&fq->lock);
+                       spin_lock(&fq->lock);
                }
        }
 
@@ -288,12 +289,14 @@ static void __ieee80211_wake_txqs(struct ieee80211_sub_if_data *sdata, int ac)
            (ps && atomic_read(&ps->num_sta_ps)) || ac != vif->txq->ac)
                goto out;
 
-       spin_unlock_bh(&fq->lock);
+       spin_unlock(&fq->lock);
 
        drv_wake_tx_queue(local, txqi);
+       local_bh_enable();
        return;
 out:
-       spin_unlock_bh(&fq->lock);
+       spin_unlock(&fq->lock);
+       local_bh_enable();
 }
 
 static void
index 4515056ef1c2b4d7ab268986152bc6845b006fbd..f9b16f2b221918f1be0ae6bd8da118b2046670a2 100644 (file)
@@ -193,21 +193,29 @@ struct ip_vs_app *register_ip_vs_app(struct netns_ipvs *ipvs, struct ip_vs_app *
 
        mutex_lock(&__ip_vs_app_mutex);
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc()) {
+               err = -ENOENT;
+               goto out_unlock;
+       }
+
        list_for_each_entry(a, &ipvs->app_list, a_list) {
                if (!strcmp(app->name, a->name)) {
                        err = -EEXIST;
+                       /* decrease the module use count */
+                       ip_vs_use_count_dec();
                        goto out_unlock;
                }
        }
        a = kmemdup(app, sizeof(*app), GFP_KERNEL);
        if (!a) {
                err = -ENOMEM;
+               /* decrease the module use count */
+               ip_vs_use_count_dec();
                goto out_unlock;
        }
        INIT_LIST_HEAD(&a->incs_list);
        list_add(&a->a_list, &ipvs->app_list);
-       /* increase the module use count */
-       ip_vs_use_count_inc();
 
 out_unlock:
        mutex_unlock(&__ip_vs_app_mutex);
index 8b48e7ce1c2c2243bbdc8c4e43e4e484f3e52df6..3cccc88ef817bdbc4719316757e8cf8b5d61df1b 100644 (file)
@@ -93,7 +93,6 @@ static bool __ip_vs_addr_is_local_v6(struct net *net,
 static void update_defense_level(struct netns_ipvs *ipvs)
 {
        struct sysinfo i;
-       static int old_secure_tcp = 0;
        int availmem;
        int nomem;
        int to_change = -1;
@@ -174,35 +173,35 @@ static void update_defense_level(struct netns_ipvs *ipvs)
        spin_lock(&ipvs->securetcp_lock);
        switch (ipvs->sysctl_secure_tcp) {
        case 0:
-               if (old_secure_tcp >= 2)
+               if (ipvs->old_secure_tcp >= 2)
                        to_change = 0;
                break;
        case 1:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                        ipvs->sysctl_secure_tcp = 2;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                }
                break;
        case 2:
                if (nomem) {
-                       if (old_secure_tcp < 2)
+                       if (ipvs->old_secure_tcp < 2)
                                to_change = 1;
                } else {
-                       if (old_secure_tcp >= 2)
+                       if (ipvs->old_secure_tcp >= 2)
                                to_change = 0;
                        ipvs->sysctl_secure_tcp = 1;
                }
                break;
        case 3:
-               if (old_secure_tcp < 2)
+               if (ipvs->old_secure_tcp < 2)
                        to_change = 1;
                break;
        }
-       old_secure_tcp = ipvs->sysctl_secure_tcp;
+       ipvs->old_secure_tcp = ipvs->sysctl_secure_tcp;
        if (to_change >= 0)
                ip_vs_protocol_timeout_change(ipvs,
                                              ipvs->sysctl_secure_tcp > 1);
@@ -1275,7 +1274,8 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
        struct ip_vs_service *svc = NULL;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
 
        /* Lookup the scheduler by 'u->sched_name' */
        if (strcmp(u->sched_name, "none")) {
@@ -2435,9 +2435,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
        if (copy_from_user(arg, user, len) != 0)
                return -EFAULT;
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        /* Handle daemons since they have another lock */
        if (cmd == IP_VS_SO_SET_STARTDAEMON ||
            cmd == IP_VS_SO_SET_STOPDAEMON) {
@@ -2450,13 +2447,13 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
                        ret = -EINVAL;
                        if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
                                    sizeof(cfg.mcast_ifn)) <= 0)
-                               goto out_dec;
+                               return ret;
                        cfg.syncid = dm->syncid;
                        ret = start_sync_thread(ipvs, &cfg, dm->state);
                } else {
                        ret = stop_sync_thread(ipvs, dm->state);
                }
-               goto out_dec;
+               return ret;
        }
 
        mutex_lock(&__ip_vs_mutex);
@@ -2551,10 +2548,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
 
   out_unlock:
        mutex_unlock(&__ip_vs_mutex);
-  out_dec:
-       /* decrease the module use count */
-       ip_vs_use_count_dec();
-
        return ret;
 }
 
index 8e104dff7abc414c3879a5a7555814badf44b8e4..166c669f07634b2652e8ed71938415de20b9a14f 100644 (file)
@@ -68,7 +68,8 @@ int register_ip_vs_pe(struct ip_vs_pe *pe)
        struct ip_vs_pe *tmp;
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_pe_mutex);
        /* Make sure that the pe with this name doesn't exist
index 2f9d5cd5daeeeb842fc174cb47612a1e81d202fe..d4903723be7e90ad6c996f00ee66486acd6302fb 100644 (file)
@@ -179,7 +179,8 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
        }
 
        /* increase the module use count */
-       ip_vs_use_count_inc();
+       if (!ip_vs_use_count_inc())
+               return -ENOENT;
 
        mutex_lock(&ip_vs_sched_mutex);
 
index a4a78c4b06dec380dc4db957f9c95cf050998580..8dc892a9dc91a9bbd0b11bf53c3da506de548fd2 100644 (file)
@@ -1762,6 +1762,10 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
                  sizeof(struct ip_vs_sync_conn_v0));
 
+       /* increase the module use count */
+       if (!ip_vs_use_count_inc())
+               return -ENOPROTOOPT;
+
        /* Do not hold one mutex and then to block on another */
        for (;;) {
                rtnl_lock();
@@ -1892,9 +1896,6 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
 
-       /* increase the module use count */
-       ip_vs_use_count_inc();
-
        return 0;
 
 out:
@@ -1924,11 +1925,17 @@ out:
                }
                kfree(ti);
        }
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 
 out_early:
        mutex_unlock(&ipvs->sync_mutex);
        rtnl_unlock();
+
+       /* decrease the module use count */
+       ip_vs_use_count_dec();
        return result;
 }
 
index 9c464d24beecdf8b90eb4a058528938e73ca7001..888d3068a492e92cce07b27cde357277a271d7c8 100644 (file)
@@ -613,7 +613,7 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
        if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
                ret = ip_vs_confirm_conntrack(skb);
        if (ret == NF_ACCEPT) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_forward_csum(skb);
        }
        return ret;
index 0c63120b2db2e1ea9983a6b1ce8d2aefebc29501..5cd610b547e0d1e3463a65ba3627f265c836bdc5 100644 (file)
@@ -1792,8 +1792,8 @@ void __nf_ct_refresh_acct(struct nf_conn *ct,
        if (nf_ct_is_confirmed(ct))
                extra_jiffies += nfct_time_stamp;
 
-       if (ct->timeout != extra_jiffies)
-               ct->timeout = extra_jiffies;
+       if (READ_ONCE(ct->timeout) != extra_jiffies)
+               WRITE_ONCE(ct->timeout, extra_jiffies);
 acct:
        if (do_acct)
                nf_ct_acct_update(ct, ctinfo, skb->len);
index 132f5228b4319df1f584d69f0ff75f40c45dad28..128245efe84abec57723f2c26262c90d7b3e27e8 100644 (file)
@@ -202,6 +202,8 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
        int err;
 
+       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+
        err = rhashtable_insert_fast(&flow_table->rhashtable,
                                     &flow->tuplehash[0].node,
                                     nf_flow_offload_rhash_params);
@@ -218,7 +220,6 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
                return err;
        }
 
-       flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
        return 0;
 }
 EXPORT_SYMBOL_GPL(flow_offload_add);
index e546f759b7a714b5376c035e1a82cc1821cb680e..ad783f4840efea23e04cd1d2bd438da274124c9c 100644 (file)
@@ -347,7 +347,7 @@ int nft_flow_rule_offload_commit(struct net *net)
 
                        policy = nft_trans_chain_policy(trans);
                        err = nft_flow_offload_chain(trans->ctx.chain, &policy,
-                                                    FLOW_BLOCK_BIND);
+                                                    FLOW_BLOCK_UNBIND);
                        break;
                case NFT_MSG_NEWRULE:
                        if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
index af1497ab9464236b3875aeb536ae32e244e99141..69d6173f91e2b9a5070ac6e98977cc5dd4dc0dfd 100644 (file)
@@ -218,8 +218,13 @@ static void nft_connlimit_destroy_clone(const struct nft_ctx *ctx,
 static bool nft_connlimit_gc(struct net *net, const struct nft_expr *expr)
 {
        struct nft_connlimit *priv = nft_expr_priv(expr);
+       bool ret;
 
-       return nf_conncount_gc_list(net, &priv->list);
+       local_bh_disable();
+       ret = nf_conncount_gc_list(net, &priv->list);
+       local_bh_enable();
+
+       return ret;
 }
 
 static struct nft_expr_type nft_connlimit_type;
index 22a80eb60222ef06b671b0b1cbea87609a71786e..5cb2d8908d2a5d47bdedcaaf3383636708b366fc 100644 (file)
@@ -161,13 +161,21 @@ static int nft_payload_offload_ll(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ethhdr, h_source):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  src, ETH_ALEN, reg);
                break;
        case offsetof(struct ethhdr, h_dest):
+               if (priv->len != ETH_ALEN)
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_ETH_ADDRS, eth_addrs,
                                  dst, ETH_ALEN, reg);
                break;
+       default:
+               return -EOPNOTSUPP;
        }
 
        return 0;
@@ -181,14 +189,23 @@ static int nft_payload_offload_ip(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct iphdr, saddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, src,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, daddr):
+               if (priv->len != sizeof(struct in_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV4_ADDRS, ipv4, dst,
                                  sizeof(struct in_addr), reg);
                break;
        case offsetof(struct iphdr, protocol):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -208,14 +225,23 @@ static int nft_payload_offload_ip6(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct ipv6hdr, saddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, src,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, daddr):
+               if (priv->len != sizeof(struct in6_addr))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_IPV6_ADDRS, ipv6, dst,
                                  sizeof(struct in6_addr), reg);
                break;
        case offsetof(struct ipv6hdr, nexthdr):
+               if (priv->len != sizeof(__u8))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_BASIC, basic, ip_proto,
                                  sizeof(__u8), reg);
                nft_offload_set_dependency(ctx, NFT_OFFLOAD_DEP_TRANSPORT);
@@ -255,10 +281,16 @@ static int nft_payload_offload_tcp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct tcphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct tcphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
@@ -277,10 +309,16 @@ static int nft_payload_offload_udp(struct nft_offload_ctx *ctx,
 
        switch (priv->offset) {
        case offsetof(struct udphdr, source):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, src,
                                  sizeof(__be16), reg);
                break;
        case offsetof(struct udphdr, dest):
+               if (priv->len != sizeof(__be16))
+                       return -EOPNOTSUPP;
+
                NFT_OFFLOAD_MATCH(FLOW_DISSECTOR_KEY_PORTS, tp, dst,
                                  sizeof(__be16), reg);
                break;
index c4f54ad2b98afb44002415ce1daf2afef646d8c0..58d5373c513c7999bd73813eafd80c8abad87dc4 100644 (file)
@@ -63,28 +63,6 @@ static DEFINE_SPINLOCK(nr_list_lock);
 
 static const struct proto_ops nr_proto_ops;
 
-/*
- * NETROM network devices are virtual network devices encapsulating NETROM
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key nr_netdev_xmit_lock_key;
-static struct lock_class_key nr_netdev_addr_lock_key;
-
-static void nr_set_lockdep_one(struct net_device *dev,
-                              struct netdev_queue *txq,
-                              void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &nr_netdev_xmit_lock_key);
-}
-
-static void nr_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &nr_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, nr_set_lockdep_one, NULL);
-}
-
 /*
  *     Socket removal during an interrupt is now safe.
  */
@@ -1414,7 +1392,6 @@ static int __init nr_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               nr_set_lockdep_key(dev);
                dev_nr[i] = dev;
        }
 
index 8dfea26536c9d42a6bd9f7b64e3001737dbc4d89..28604414dec1b7d03f0d175cc87c10e0922c7043 100644 (file)
@@ -107,9 +107,14 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
        llcp_sock->service_name = kmemdup(llcp_addr.service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
-
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto put_dev;
+       }
        llcp_sock->ssap = nfc_llcp_get_sdp_ssap(local, llcp_sock);
        if (llcp_sock->ssap == LLCP_SAP_MAX) {
+               kfree(llcp_sock->service_name);
+               llcp_sock->service_name = NULL;
                ret = -EADDRINUSE;
                goto put_dev;
        }
@@ -549,11 +554,11 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
        if (sk->sk_state == LLCP_LISTEN)
                return llcp_accept_poll(sk);
 
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        if (sk->sk_state == LLCP_CLOSED)
index 3572e11b6f21ed09ab476d39b1c86bcfb15c5fa2..1c77f520f474d91e02c0c3460dbe6c707f9127c4 100644 (file)
@@ -165,7 +165,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 {
        int err;
 
-       err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype);
+       err = skb_mpls_push(skb, mpls->mpls_lse, mpls->mpls_ethertype,
+                           skb->mac_len);
        if (err)
                return err;
 
@@ -178,7 +179,7 @@ static int pop_mpls(struct sk_buff *skb, struct sw_flow_key *key,
 {
        int err;
 
-       err = skb_mpls_pop(skb, ethertype);
+       err = skb_mpls_pop(skb, ethertype, skb->mac_len);
        if (err)
                return err;
 
index f30e406fbec5a321de0a489b2d41c39dd08fb561..d8c364d637b12d8ec6f1e30bb46a06b27cd06311 100644 (file)
@@ -1881,7 +1881,7 @@ static struct genl_family dp_datapath_genl_family __ro_after_init = {
 /* Called with ovs_mutex or RCU read lock. */
 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                                   struct net *net, u32 portid, u32 seq,
-                                  u32 flags, u8 cmd)
+                                  u32 flags, u8 cmd, gfp_t gfp)
 {
        struct ovs_header *ovs_header;
        struct ovs_vport_stats vport_stats;
@@ -1902,7 +1902,7 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                goto nla_put_failure;
 
        if (!net_eq(net, dev_net(vport->dev))) {
-               int id = peernet2id_alloc(net, dev_net(vport->dev));
+               int id = peernet2id_alloc(net, dev_net(vport->dev), gfp);
 
                if (nla_put_s32(skb, OVS_VPORT_ATTR_NETNSID, id))
                        goto nla_put_failure;
@@ -1943,11 +1943,12 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, struct net *net,
        struct sk_buff *skb;
        int retval;
 
-       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+       skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!skb)
                return ERR_PTR(-ENOMEM);
 
-       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd);
+       retval = ovs_vport_cmd_fill_info(vport, skb, net, portid, seq, 0, cmd,
+                                        GFP_KERNEL);
        BUG_ON(retval < 0);
 
        return skb;
@@ -2089,7 +2090,7 @@ restart:
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_NEW);
+                                     OVS_VPORT_CMD_NEW, GFP_KERNEL);
 
        new_headroom = netdev_get_fwd_headroom(vport->dev);
 
@@ -2150,7 +2151,7 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_SET);
+                                     OVS_VPORT_CMD_SET, GFP_KERNEL);
        BUG_ON(err < 0);
 
        ovs_unlock();
@@ -2190,7 +2191,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
 
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_DEL);
+                                     OVS_VPORT_CMD_DEL, GFP_KERNEL);
        BUG_ON(err < 0);
 
        /* the vport deletion may trigger dp headroom update */
@@ -2237,7 +2238,7 @@ static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto exit_unlock_free;
        err = ovs_vport_cmd_fill_info(vport, reply, genl_info_net(info),
                                      info->snd_portid, info->snd_seq, 0,
-                                     OVS_VPORT_CMD_GET);
+                                     OVS_VPORT_CMD_GET, GFP_ATOMIC);
        BUG_ON(err < 0);
        rcu_read_unlock();
 
@@ -2273,7 +2274,8 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
                                                    NETLINK_CB(cb->skb).portid,
                                                    cb->nlh->nlmsg_seq,
                                                    NLM_F_MULTI,
-                                                   OVS_VPORT_CMD_GET) < 0)
+                                                   OVS_VPORT_CMD_GET,
+                                                   GFP_ATOMIC) < 0)
                                goto out;
 
                        j++;
index d2437b5b2f6ad093c0738e48ebe9261ead757e13..58a7b8312c28992ad96827d1eabea443f098a448 100644 (file)
@@ -137,7 +137,7 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_OPENVSWITCH |
                              IFF_NO_QUEUE;
        netdev->needs_free_netdev = true;
-       netdev->priv_destructor = internal_dev_destructor;
+       netdev->priv_destructor = NULL;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
        netdev->rtnl_link_ops = &internal_dev_link_ops;
 
@@ -159,7 +159,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        struct internal_dev *internal_dev;
        struct net_device *dev;
        int err;
-       bool free_vport = true;
 
        vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
        if (IS_ERR(vport)) {
@@ -190,10 +189,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
 
        rtnl_lock();
        err = register_netdevice(vport->dev);
-       if (err) {
-               free_vport = false;
+       if (err)
                goto error_unlock;
-       }
+       vport->dev->priv_destructor = internal_dev_destructor;
 
        dev_set_promiscuity(vport->dev, 1);
        rtnl_unlock();
@@ -207,8 +205,7 @@ error_unlock:
 error_free_netdev:
        free_netdev(dev);
 error_free_vport:
-       if (free_vport)
-               ovs_vport_free(vport);
+       ovs_vport_free(vport);
 error:
        return ERR_PTR(err);
 }
@@ -237,7 +234,7 @@ static netdev_tx_t internal_dev_recv(struct sk_buff *skb)
        }
 
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        secpath_reset(skb);
 
        skb->pkt_type = PACKET_HOST;
index e2742b006d255f598fc98953dbb823f615d2bf9a..82a50e850245ec56d258687f8f2dcdd411603df6 100644 (file)
@@ -1821,7 +1821,7 @@ static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spkt = &PACKET_SKB_CB(skb)->sa.pkt;
 
@@ -2121,7 +2121,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        skb_dst_drop(skb);
 
        /* drop conntrack reference */
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        spin_lock(&sk->sk_receive_queue.lock);
        po->stats.stats1.tp_packets++;
index 96ea9f254ae90ae957f800566b05a7f75d13948f..76d499f6af9ab32aa17422ef911497585b6649b7 100644 (file)
@@ -338,9 +338,9 @@ static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
 
        if (sk->sk_state == TCP_CLOSE)
                return EPOLLERR;
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
-       if (!skb_queue_empty(&pn->ctrlreq_queue))
+       if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
                mask |= EPOLLPRI;
        if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
                return EPOLLHUP;
index 45acab2de0cf3e09b295d2faf7db3b3c78528fc5..9de2ae22d583136c0346dcfbb9951344d6645cc4 100644 (file)
@@ -143,6 +143,9 @@ static void rds_ib_add_one(struct ib_device *device)
        refcount_set(&rds_ibdev->refcount, 1);
        INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
 
+       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
+       INIT_LIST_HEAD(&rds_ibdev->conn_list);
+
        rds_ibdev->max_wrs = device->attrs.max_qp_wr;
        rds_ibdev->max_sge = min(device->attrs.max_send_sge, RDS_IB_MAX_SGE);
 
@@ -203,9 +206,6 @@ static void rds_ib_add_one(struct ib_device *device)
                device->name,
                rds_ibdev->use_fastreg ? "FRMR" : "FMR");
 
-       INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
-       INIT_LIST_HEAD(&rds_ibdev->conn_list);
-
        down_write(&rds_ib_devices_lock);
        list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
        up_write(&rds_ib_devices_lock);
index f0e9ccf472a970004bd4ea1adb4d758b926b5837..6a0df7c8a939e4976aa2815127885e127dd864fc 100644 (file)
@@ -64,28 +64,6 @@ static const struct proto_ops rose_proto_ops;
 
 ax25_address rose_callsign;
 
-/*
- * ROSE network devices are virtual network devices encapsulating ROSE
- * frames into AX.25 which will be sent through an AX.25 device, so form a
- * special "super class" of normal net devices; split their locks off into a
- * separate class since they always nest.
- */
-static struct lock_class_key rose_netdev_xmit_lock_key;
-static struct lock_class_key rose_netdev_addr_lock_key;
-
-static void rose_set_lockdep_one(struct net_device *dev,
-                                struct netdev_queue *txq,
-                                void *_unused)
-{
-       lockdep_set_class(&txq->_xmit_lock, &rose_netdev_xmit_lock_key);
-}
-
-static void rose_set_lockdep_key(struct net_device *dev)
-{
-       lockdep_set_class(&dev->addr_list_lock, &rose_netdev_addr_lock_key);
-       netdev_for_each_tx_queue(dev, rose_set_lockdep_one, NULL);
-}
-
 /*
  *     Convert a ROSE address into text.
  */
@@ -1533,7 +1511,6 @@ static int __init rose_proto_init(void)
                        free_netdev(dev);
                        goto fail;
                }
-               rose_set_lockdep_key(dev);
                dev_rose[i] = dev;
        }
 
index 1091bf35a1992c8bdecbfcd2b8551f7316377133..7c7d10f2e0c181776611cd54d6048a17ed512d5f 100644 (file)
@@ -556,6 +556,7 @@ struct rxrpc_call {
        struct rxrpc_peer       *peer;          /* Peer record for remote address */
        struct rxrpc_sock __rcu *socket;        /* socket responsible */
        struct rxrpc_net        *rxnet;         /* Network namespace to which call belongs */
+       const struct rxrpc_security *security;  /* applied security module */
        struct mutex            user_mutex;     /* User access mutex */
        unsigned long           ack_at;         /* When deferred ACK needs to happen */
        unsigned long           ack_lost_at;    /* When ACK is figured as lost */
@@ -600,6 +601,7 @@ struct rxrpc_call {
        int                     debug_id;       /* debug ID for printks */
        unsigned short          rx_pkt_offset;  /* Current recvmsg packet offset */
        unsigned short          rx_pkt_len;     /* Current recvmsg packet len */
+       bool                    rx_pkt_last;    /* Current recvmsg packet is last */
 
        /* Rx/Tx circular buffer, depending on phase.
         *
index 00c095d74145ddd8349bcb2efde2ae9c1d295f50..135bf5cd8dd51be0414ebb89975331d3f47d7b4e 100644 (file)
@@ -84,7 +84,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
                smp_store_release(&b->conn_backlog_head,
                                  (head + 1) & (size - 1));
 
-               trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
                                 atomic_read(&conn->usage), here);
        }
 
@@ -97,7 +97,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
        call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
        call->state = RXRPC_CALL_SERVER_PREALLOC;
 
-       trace_rxrpc_call(call, rxrpc_call_new_service,
+       trace_rxrpc_call(call->debug_id, rxrpc_call_new_service,
                         atomic_read(&call->usage),
                         here, (const void *)user_call_ID);
 
@@ -307,6 +307,7 @@ static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx,
 
        rxrpc_see_call(call);
        call->conn = conn;
+       call->security = conn->security;
        call->peer = rxrpc_get_peer(conn->params.peer);
        call->cong_cwnd = call->peer->cong_cwnd;
        return call;
index 32d8dc677142db1b1920878cccb3de9c95b7b767..a31c18c09894d5dd55e7baccacd3c07e18d3b5fc 100644 (file)
@@ -240,7 +240,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        if (p->intr)
                __set_bit(RXRPC_CALL_IS_INTR, &call->flags);
        call->tx_total_len = p->tx_total_len;
-       trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
+       trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
+                        atomic_read(&call->usage),
                         here, (const void *)p->user_call_ID);
 
        /* We need to protect a partially set up call against the user as we
@@ -290,8 +291,8 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
        if (ret < 0)
                goto error;
 
-       trace_rxrpc_call(call, rxrpc_call_connected, atomic_read(&call->usage),
-                        here, NULL);
+       trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
+                        atomic_read(&call->usage), here, NULL);
 
        rxrpc_start_call_timer(call);
 
@@ -313,8 +314,8 @@ error_dup_user_ID:
 error:
        __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
                                    RX_CALL_DEAD, ret);
-       trace_rxrpc_call(call, rxrpc_call_error, atomic_read(&call->usage),
-                        here, ERR_PTR(ret));
+       trace_rxrpc_call(call->debug_id, rxrpc_call_error,
+                        atomic_read(&call->usage), here, ERR_PTR(ret));
        rxrpc_release_call(rx, call);
        mutex_unlock(&call->user_mutex);
        rxrpc_put_call(call, rxrpc_call_put);
@@ -376,7 +377,8 @@ bool rxrpc_queue_call(struct rxrpc_call *call)
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&call->processor))
-               trace_rxrpc_call(call, rxrpc_call_queued, n + 1, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
+                                here, NULL);
        else
                rxrpc_put_call(call, rxrpc_call_put_noqueue);
        return true;
@@ -391,7 +393,8 @@ bool __rxrpc_queue_call(struct rxrpc_call *call)
        int n = atomic_read(&call->usage);
        ASSERTCMP(n, >=, 1);
        if (rxrpc_queue_work(&call->processor))
-               trace_rxrpc_call(call, rxrpc_call_queued_ref, n, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
+                                here, NULL);
        else
                rxrpc_put_call(call, rxrpc_call_put_noqueue);
        return true;
@@ -406,7 +409,8 @@ void rxrpc_see_call(struct rxrpc_call *call)
        if (call) {
                int n = atomic_read(&call->usage);
 
-               trace_rxrpc_call(call, rxrpc_call_seen, n, here, NULL);
+               trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
+                                here, NULL);
        }
 }
 
@@ -418,7 +422,7 @@ void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(&call->usage);
 
-       trace_rxrpc_call(call, op, n, here, NULL);
+       trace_rxrpc_call(call->debug_id, op, n, here, NULL);
 }
 
 /*
@@ -445,7 +449,8 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
 
-       trace_rxrpc_call(call, rxrpc_call_release, atomic_read(&call->usage),
+       trace_rxrpc_call(call->debug_id, rxrpc_call_release,
+                        atomic_read(&call->usage),
                         here, (const void *)call->flags);
 
        ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
@@ -488,10 +493,10 @@ void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
 
        _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
 
-       if (conn) {
+       if (conn)
                rxrpc_disconnect_call(call);
-               conn->security->free_call_crypto(call);
-       }
+       if (call->security)
+               call->security->free_call_crypto(call);
 
        rxrpc_cleanup_ring(call);
        _leave("");
@@ -534,12 +539,13 @@ void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
 {
        struct rxrpc_net *rxnet = call->rxnet;
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = call->debug_id;
        int n;
 
        ASSERT(call != NULL);
 
        n = atomic_dec_return(&call->usage);
-       trace_rxrpc_call(call, op, n, here, NULL);
+       trace_rxrpc_call(debug_id, op, n, here, NULL);
        ASSERTCMP(n, >=, 0);
        if (n == 0) {
                _debug("call %d dead", call->debug_id);
index 3f1da1b49f690d0e3ccf6d93607cd2b0583d293b..376370cd9285252db965819427559ee07cc30c48 100644 (file)
@@ -212,7 +212,8 @@ rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
        rxrpc_get_local(conn->params.local);
        key_get(conn->params.key);
 
-       trace_rxrpc_conn(conn, rxrpc_conn_new_client, atomic_read(&conn->usage),
+       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
+                        atomic_read(&conn->usage),
                         __builtin_return_address(0));
        trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
        _leave(" = %p", conn);
@@ -352,6 +353,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
 
        if (cp->exclusive) {
                call->conn = candidate;
+               call->security = candidate->security;
                call->security_ix = candidate->security_ix;
                call->service_id = candidate->service_id;
                _leave(" = 0 [exclusive %d]", candidate->debug_id);
@@ -403,6 +405,7 @@ static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
 candidate_published:
        set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
        call->conn = candidate;
+       call->security = candidate->security;
        call->security_ix = candidate->security_ix;
        call->service_id = candidate->service_id;
        spin_unlock(&local->client_conns_lock);
@@ -425,6 +428,7 @@ found_extant_conn:
 
        spin_lock(&conn->channel_lock);
        call->conn = conn;
+       call->security = conn->security;
        call->security_ix = conn->security_ix;
        call->service_id = conn->service_id;
        list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
@@ -985,11 +989,12 @@ rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = conn->debug_id;
        int n;
 
        do {
                n = atomic_dec_return(&conn->usage);
-               trace_rxrpc_conn(conn, rxrpc_conn_put_client, n, here);
+               trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
                if (n > 0)
                        return;
                ASSERTCMP(n, >=, 0);
index ed05b692213226e7494c3908caa4dcf44f39bf27..38d718e90dc69a35227ee80e9bf6704705a2e161 100644 (file)
@@ -269,7 +269,7 @@ bool rxrpc_queue_conn(struct rxrpc_connection *conn)
        if (n == 0)
                return false;
        if (rxrpc_queue_work(&conn->processor))
-               trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_queued, n + 1, here);
        else
                rxrpc_put_connection(conn);
        return true;
@@ -284,7 +284,7 @@ void rxrpc_see_connection(struct rxrpc_connection *conn)
        if (conn) {
                int n = atomic_read(&conn->usage);
 
-               trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_seen, n, here);
        }
 }
 
@@ -296,7 +296,7 @@ void rxrpc_get_connection(struct rxrpc_connection *conn)
        const void *here = __builtin_return_address(0);
        int n = atomic_inc_return(&conn->usage);
 
-       trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
+       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n, here);
 }
 
 /*
@@ -310,7 +310,7 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
        if (conn) {
                int n = atomic_fetch_add_unless(&conn->usage, 1, 0);
                if (n > 0)
-                       trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
+                       trace_rxrpc_conn(conn->debug_id, rxrpc_conn_got, n + 1, here);
                else
                        conn = NULL;
        }
@@ -333,10 +333,11 @@ static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
 void rxrpc_put_service_conn(struct rxrpc_connection *conn)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = conn->debug_id;
        int n;
 
        n = atomic_dec_return(&conn->usage);
-       trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
+       trace_rxrpc_conn(debug_id, rxrpc_conn_put_service, n, here);
        ASSERTCMP(n, >=, 0);
        if (n == 1)
                rxrpc_set_service_reap_timer(conn->params.local->rxnet,
@@ -420,7 +421,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
                 */
                if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
                        continue;
-               trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, NULL);
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_reap_service, 0, NULL);
 
                if (rxrpc_conn_is_client(conn))
                        BUG();
index b30e13f6d95fdf77689cd6b1491b114ddb1dc9ed..123d6ceab15cb0b00ccf65aec61370b64cda811b 100644 (file)
@@ -134,7 +134,7 @@ struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *rxn
                list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
                write_unlock(&rxnet->conn_lock);
 
-               trace_rxrpc_conn(conn, rxrpc_conn_new_service,
+               trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_service,
                                 atomic_read(&conn->usage),
                                 __builtin_return_address(0));
        }
index c97ebdc043e44525eaecdd54bc447c1895bdca74..48f67a9b1037ceb7b7a749a241bbb127a12a1f67 100644 (file)
@@ -147,10 +147,16 @@ void rxrpc_error_report(struct sock *sk)
 {
        struct sock_exterr_skb *serr;
        struct sockaddr_rxrpc srx;
-       struct rxrpc_local *local = sk->sk_user_data;
+       struct rxrpc_local *local;
        struct rxrpc_peer *peer;
        struct sk_buff *skb;
 
+       rcu_read_lock();
+       local = rcu_dereference_sk_user_data(sk);
+       if (unlikely(!local)) {
+               rcu_read_unlock();
+               return;
+       }
        _enter("%p{%d}", sk, local->debug_id);
 
        /* Clear the outstanding error value on the socket so that it doesn't
@@ -160,6 +166,7 @@ void rxrpc_error_report(struct sock *sk)
 
        skb = sock_dequeue_err_skb(sk);
        if (!skb) {
+               rcu_read_unlock();
                _leave("UDP socket errqueue empty");
                return;
        }
@@ -167,11 +174,11 @@ void rxrpc_error_report(struct sock *sk)
        serr = SKB_EXT_ERR(skb);
        if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
                _leave("UDP empty message");
+               rcu_read_unlock();
                rxrpc_free_skb(skb, rxrpc_skb_freed);
                return;
        }
 
-       rcu_read_lock();
        peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
        if (peer && !rxrpc_get_peer_maybe(peer))
                peer = NULL;
index 9c3ac96f71cbf8202ccdeca3af388ad8a2ae08b3..64830d8c1fdb5cf21497e89b812a18aa5fb9c143 100644 (file)
@@ -216,7 +216,7 @@ struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *local, gfp_t gfp)
        peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
        if (peer) {
                atomic_set(&peer->usage, 1);
-               peer->local = local;
+               peer->local = rxrpc_get_local(local);
                INIT_HLIST_HEAD(&peer->error_targets);
                peer->service_conns = RB_ROOT;
                seqlock_init(&peer->service_conn_lock);
@@ -307,7 +307,6 @@ void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local,
        unsigned long hash_key;
 
        hash_key = rxrpc_peer_hash_key(local, &peer->srx);
-       peer->local = local;
        rxrpc_init_peer(rx, peer, hash_key);
 
        spin_lock(&rxnet->peer_hash_lock);
@@ -382,7 +381,7 @@ struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
        int n;
 
        n = atomic_inc_return(&peer->usage);
-       trace_rxrpc_peer(peer, rxrpc_peer_got, n, here);
+       trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
        return peer;
 }
 
@@ -396,7 +395,7 @@ struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
        if (peer) {
                int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
                if (n > 0)
-                       trace_rxrpc_peer(peer, rxrpc_peer_got, n + 1, here);
+                       trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
                else
                        peer = NULL;
        }
@@ -417,6 +416,7 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
        list_del_init(&peer->keepalive_link);
        spin_unlock_bh(&rxnet->peer_hash_lock);
 
+       rxrpc_put_local(peer->local);
        kfree_rcu(peer, rcu);
 }
 
@@ -426,11 +426,13 @@ static void __rxrpc_put_peer(struct rxrpc_peer *peer)
 void rxrpc_put_peer(struct rxrpc_peer *peer)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id;
        int n;
 
        if (peer) {
+               debug_id = peer->debug_id;
                n = atomic_dec_return(&peer->usage);
-               trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+               trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
                if (n == 0)
                        __rxrpc_put_peer(peer);
        }
@@ -443,13 +445,15 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
 void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
 {
        const void *here = __builtin_return_address(0);
+       unsigned int debug_id = peer->debug_id;
        int n;
 
        n = atomic_dec_return(&peer->usage);
-       trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+       trace_rxrpc_peer(debug_id, rxrpc_peer_put, n, here);
        if (n == 0) {
                hash_del_rcu(&peer->hash_link);
                list_del_init(&peer->keepalive_link);
+               rxrpc_put_local(peer->local);
                kfree_rcu(peer, rcu);
        }
 }
index 3b0becb1204156eff6753f2399ec09c40fd233b0..8578c39ec83959b99dc50fb00fb70968e09ad18e 100644 (file)
@@ -251,8 +251,8 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
                seq += subpacket;
        }
 
-       return call->conn->security->verify_packet(call, skb, offset, len,
-                                                  seq, cksum);
+       return call->security->verify_packet(call, skb, offset, len,
+                                            seq, cksum);
 }
 
 /*
@@ -267,11 +267,13 @@ static int rxrpc_verify_packet(struct rxrpc_call *call, struct sk_buff *skb,
  */
 static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
                             u8 *_annotation,
-                            unsigned int *_offset, unsigned int *_len)
+                            unsigned int *_offset, unsigned int *_len,
+                            bool *_last)
 {
        struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
        unsigned int offset = sizeof(struct rxrpc_wire_header);
        unsigned int len;
+       bool last = false;
        int ret;
        u8 annotation = *_annotation;
        u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET;
@@ -281,6 +283,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
        len = skb->len - offset;
        if (subpacket < sp->nr_subpackets - 1)
                len = RXRPC_JUMBO_DATALEN;
+       else if (sp->rx_flags & RXRPC_SKB_INCL_LAST)
+               last = true;
 
        if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) {
                ret = rxrpc_verify_packet(call, skb, annotation, offset, len);
@@ -291,7 +295,8 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
 
        *_offset = offset;
        *_len = len;
-       call->conn->security->locate_data(call, skb, _offset, _len);
+       *_last = last;
+       call->security->locate_data(call, skb, _offset, _len);
        return 0;
 }
 
@@ -309,7 +314,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        rxrpc_serial_t serial;
        rxrpc_seq_t hard_ack, top, seq;
        size_t remain;
-       bool last;
+       bool rx_pkt_last;
        unsigned int rx_pkt_offset, rx_pkt_len;
        int ix, copy, ret = -EAGAIN, ret2;
 
@@ -319,6 +324,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
 
        rx_pkt_offset = call->rx_pkt_offset;
        rx_pkt_len = call->rx_pkt_len;
+       rx_pkt_last = call->rx_pkt_last;
 
        if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) {
                seq = call->rx_hard_ack;
@@ -329,6 +335,7 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
        /* Barriers against rxrpc_input_data(). */
        hard_ack = call->rx_hard_ack;
        seq = hard_ack + 1;
+
        while (top = smp_load_acquire(&call->rx_top),
               before_eq(seq, top)
               ) {
@@ -356,7 +363,8 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                if (rx_pkt_offset == 0) {
                        ret2 = rxrpc_locate_data(call, skb,
                                                 &call->rxtx_annotations[ix],
-                                                &rx_pkt_offset, &rx_pkt_len);
+                                                &rx_pkt_offset, &rx_pkt_len,
+                                                &rx_pkt_last);
                        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_next, seq,
                                            rx_pkt_offset, rx_pkt_len, ret2);
                        if (ret2 < 0) {
@@ -396,13 +404,12 @@ static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call,
                }
 
                /* The whole packet has been transferred. */
-               last = sp->hdr.flags & RXRPC_LAST_PACKET;
                if (!(flags & MSG_PEEK))
                        rxrpc_rotate_rx_window(call);
                rx_pkt_offset = 0;
                rx_pkt_len = 0;
 
-               if (last) {
+               if (rx_pkt_last) {
                        ASSERTCMP(seq, ==, READ_ONCE(call->rx_top));
                        ret = 1;
                        goto out;
@@ -415,6 +422,7 @@ out:
        if (!(flags & MSG_PEEK)) {
                call->rx_pkt_offset = rx_pkt_offset;
                call->rx_pkt_len = rx_pkt_len;
+               call->rx_pkt_last = rx_pkt_last;
        }
 done:
        trace_rxrpc_recvmsg(call, rxrpc_recvmsg_data_return, seq,
index 6a1547b270fef519fc5c334ced74b728f6484d28..813fd68881429a8d8f029c2e819393f1f4d21b70 100644 (file)
@@ -419,7 +419,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
                                 call->tx_winsize)
                                sp->hdr.flags |= RXRPC_MORE_PACKETS;
 
-                       ret = conn->security->secure_packet(
+                       ret = call->security->secure_packet(
                                call, skb, skb->mark, skb->head);
                        if (ret < 0)
                                goto out;
@@ -661,6 +661,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
                case RXRPC_CALL_SERVER_PREALLOC:
                case RXRPC_CALL_SERVER_SECURING:
                case RXRPC_CALL_SERVER_ACCEPTING:
+                       rxrpc_put_call(call, rxrpc_call_put);
                        ret = -EBUSY;
                        goto error_release_sock;
                default:
index 2558f00f6b3eda64a87f63c32592c15b7a0a2bfe..69d4676a402f54ac0d03999b5c73e572c1f06447 100644 (file)
@@ -832,8 +832,7 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
 }
 
 static const struct nla_policy tcf_action_policy[TCA_ACT_MAX + 1] = {
-       [TCA_ACT_KIND]          = { .type = NLA_NUL_STRING,
-                                   .len = IFNAMSIZ - 1 },
+       [TCA_ACT_KIND]          = { .type = NLA_STRING },
        [TCA_ACT_INDEX]         = { .type = NLA_U32 },
        [TCA_ACT_COOKIE]        = { .type = NLA_BINARY,
                                    .len = TC_COOKIE_MAX_SIZE },
@@ -865,8 +864,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                        NL_SET_ERR_MSG(extack, "TC action kind must be specified");
                        goto err_out;
                }
-               nla_strlcpy(act_name, kind, IFNAMSIZ);
-
+               if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) {
+                       NL_SET_ERR_MSG(extack, "TC action name too long");
+                       goto err_out;
+               }
                if (tb[TCA_ACT_COOKIE]) {
                        cookie = nla_memdup_cookie(tb);
                        if (!cookie) {
@@ -1352,11 +1353,16 @@ static int tcf_action_add(struct net *net, struct nlattr *nla,
                          struct netlink_ext_ack *extack)
 {
        size_t attr_size = 0;
-       int ret = 0;
+       int loop, ret;
        struct tc_action *actions[TCA_ACT_MAX_PRIO] = {};
 
-       ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0, actions,
-                             &attr_size, true, extack);
+       for (loop = 0; loop < 10; loop++) {
+               ret = tcf_action_init(net, NULL, nla, NULL, NULL, ovr, 0,
+                                     actions, &attr_size, true, extack);
+               if (ret != -EAGAIN)
+                       break;
+       }
+
        if (ret < 0)
                return ret;
        ret = tcf_add_notify(net, n, actions, portid, attr_size, extack);
@@ -1406,11 +1412,8 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n,
                 */
                if (n->nlmsg_flags & NLM_F_REPLACE)
                        ovr = 1;
-replay:
                ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr,
                                     extack);
-               if (ret == -EAGAIN)
-                       goto replay;
                break;
        case RTM_DELACTION:
                ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
index 9ce073a05414ae3289190446cb6cee057588f07d..08923b21e56607f4b163dafa4e2f97d348a34061 100644 (file)
@@ -484,7 +484,11 @@ static int __init mirred_init_module(void)
                return err;
 
        pr_info("Mirror/redirect action on\n");
-       return tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+       err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
+       if (err)
+               unregister_netdevice_notifier(&mirred_device_notifier);
+
+       return err;
 }
 
 static void __exit mirred_cleanup_module(void)
index e168df0e008acef81e579900602138b5da683111..4cf6c553bb0bbf06376d4068c25775c1948cf065 100644 (file)
@@ -55,7 +55,7 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
        struct tcf_mpls *m = to_mpls(a);
        struct tcf_mpls_params *p;
        __be32 new_lse;
-       int ret;
+       int ret, mac_len;
 
        tcf_lastuse_update(&m->tcf_tm);
        bstats_cpu_update(this_cpu_ptr(m->common.cpu_bstats), skb);
@@ -63,8 +63,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
        /* Ensure 'data' points at mac_header prior calling mpls manipulating
         * functions.
         */
-       if (skb_at_tc_ingress(skb))
+       if (skb_at_tc_ingress(skb)) {
                skb_push_rcsum(skb, skb->mac_len);
+               mac_len = skb->mac_len;
+       } else {
+               mac_len = skb_network_header(skb) - skb_mac_header(skb);
+       }
 
        ret = READ_ONCE(m->tcf_action);
 
@@ -72,12 +76,12 @@ static int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a,
 
        switch (p->tcfm_action) {
        case TCA_MPLS_ACT_POP:
-               if (skb_mpls_pop(skb, p->tcfm_proto))
+               if (skb_mpls_pop(skb, p->tcfm_proto, mac_len))
                        goto drop;
                break;
        case TCA_MPLS_ACT_PUSH:
                new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb->protocol));
-               if (skb_mpls_push(skb, new_lse, p->tcfm_proto))
+               if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len))
                        goto drop;
                break;
        case TCA_MPLS_ACT_MODIFY:
index 64584a1df42548427014d98c4c8e088aa5351a6f..8717c0b26c905457e5b220465cceb2ffae71e561 100644 (file)
@@ -162,11 +162,22 @@ static inline u32 tcf_auto_prio(struct tcf_proto *tp)
        return TC_H_MAJ(first);
 }
 
+static bool tcf_proto_check_kind(struct nlattr *kind, char *name)
+{
+       if (kind)
+               return nla_strlcpy(name, kind, IFNAMSIZ) >= IFNAMSIZ;
+       memset(name, 0, IFNAMSIZ);
+       return false;
+}
+
 static bool tcf_proto_is_unlocked(const char *kind)
 {
        const struct tcf_proto_ops *ops;
        bool ret;
 
+       if (strlen(kind) == 0)
+               return false;
+
        ops = tcf_proto_lookup_ops(kind, false, NULL);
        /* On error return false to take rtnl lock. Proto lookup/create
         * functions will perform lookup again and properly handle errors.
@@ -1843,6 +1854,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -1899,13 +1911,19 @@ replay:
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
+
        /* Take rtnl mutex if rtnl_held was set to true on previous iteration,
         * block is shared (no qdisc found), qdisc is not unlocked, classifier
         * type is not specified, classifier is not unlocked.
         */
        if (rtnl_held ||
            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
@@ -2063,6 +2081,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -2102,13 +2121,18 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
        /* Take rtnl mutex if flushing whole chain, block is shared (no qdisc
         * found), qdisc is not unlocked, classifier type is not specified,
         * classifier is not unlocked.
         */
        if (!prio ||
            (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
@@ -2216,6 +2240,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
 {
        struct net *net = sock_net(skb->sk);
        struct nlattr *tca[TCA_MAX + 1];
+       char name[IFNAMSIZ];
        struct tcmsg *t;
        u32 protocol;
        u32 prio;
@@ -2252,12 +2277,17 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
        if (err)
                return err;
 
+       if (tcf_proto_check_kind(tca[TCA_KIND], name)) {
+               NL_SET_ERR_MSG(extack, "Specified TC filter name too long");
+               err = -EINVAL;
+               goto errout;
+       }
        /* Take rtnl mutex if block is shared (no qdisc found), qdisc is not
         * unlocked, classifier type is not specified, classifier is not
         * unlocked.
         */
        if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) ||
-           !tca[TCA_KIND] || !tcf_proto_is_unlocked(nla_data(tca[TCA_KIND]))) {
+           !tcf_proto_is_unlocked(name)) {
                rtnl_held = true;
                rtnl_lock();
        }
index bf10bdaf5012734012b34a30e0d31faebcbeed21..8229ed4a67bee6eaa46cd365f202b6e698b1d7a9 100644 (file)
@@ -162,16 +162,20 @@ static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
        cls_bpf.name = obj->bpf_name;
        cls_bpf.exts_integrated = obj->exts_integrated;
 
-       if (oldprog)
+       if (oldprog && prog)
                err = tc_setup_cb_replace(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                          skip_sw, &oldprog->gen_flags,
                                          &oldprog->in_hw_count,
                                          &prog->gen_flags, &prog->in_hw_count,
                                          true);
-       else
+       else if (prog)
                err = tc_setup_cb_add(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
                                      skip_sw, &prog->gen_flags,
                                      &prog->in_hw_count, true);
+       else
+               err = tc_setup_cb_destroy(block, tp, TC_SETUP_CLSBPF, &cls_bpf,
+                                         skip_sw, &oldprog->gen_flags,
+                                         &oldprog->in_hw_count, true);
 
        if (prog && err) {
                cls_bpf_offload_cmd(tp, oldprog, prog, extack);
index 82bd14e7ac93dc709483b3437cdc1779b34d0888..3177dcb173161629a801278db38fabeb6fcdbdd9 100644 (file)
@@ -446,7 +446,7 @@ META_COLLECTOR(int_sk_wmem_queued)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_wmem_queued;
+       dst->value = READ_ONCE(sk->sk_wmem_queued);
 }
 
 META_COLLECTOR(int_sk_fwd_alloc)
@@ -554,7 +554,7 @@ META_COLLECTOR(int_sk_rcvlowat)
                *err = -1;
                return;
        }
-       dst->value = sk->sk_rcvlowat;
+       dst->value = READ_ONCE(sk->sk_rcvlowat);
 }
 
 META_COLLECTOR(int_sk_rcvtimeo)
index 81d58b28061221f6af6744080187489f674386ed..1047825d9f48d546fe1339a25f3f86979e7ac801 100644 (file)
@@ -1390,8 +1390,7 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
 }
 
 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
-       [TCA_KIND]              = { .type = NLA_NUL_STRING,
-                                   .len = IFNAMSIZ - 1 },
+       [TCA_KIND]              = { .type = NLA_STRING },
        [TCA_RATE]              = { .type = NLA_BINARY,
                                    .len = sizeof(struct tc_estimator) },
        [TCA_STAB]              = { .type = NLA_NESTED },
index 06c7a2da21bc20e8f7e6ad02da0b0b3e3d933928..39b427dc751282db7adb2d0803eecccb0457c316 100644 (file)
@@ -1127,6 +1127,33 @@ static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
        [TCA_CBQ_POLICE]        = { .len = sizeof(struct tc_cbq_police) },
 };
 
+static int cbq_opt_parse(struct nlattr *tb[TCA_CBQ_MAX + 1],
+                        struct nlattr *opt,
+                        struct netlink_ext_ack *extack)
+{
+       int err;
+
+       if (!opt) {
+               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
+               return -EINVAL;
+       }
+
+       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt,
+                                         cbq_policy, extack);
+       if (err < 0)
+               return err;
+
+       if (tb[TCA_CBQ_WRROPT]) {
+               const struct tc_cbq_wrropt *wrr = nla_data(tb[TCA_CBQ_WRROPT]);
+
+               if (wrr->priority > TC_CBQ_MAXPRIO) {
+                       NL_SET_ERR_MSG(extack, "priority is bigger than TC_CBQ_MAXPRIO");
+                       err = -EINVAL;
+               }
+       }
+       return err;
+}
+
 static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
                    struct netlink_ext_ack *extack)
 {
@@ -1139,13 +1166,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt,
        hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        q->delay_timer.function = cbq_undelay;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "CBQ options are required for this operation");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
-                                         extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
@@ -1464,13 +1485,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
        struct cbq_class *parent;
        struct qdisc_rate_table *rtab = NULL;
 
-       if (!opt) {
-               NL_SET_ERR_MSG(extack, "Mandatory qdisc options missing");
-               return -EINVAL;
-       }
-
-       err = nla_parse_nested_deprecated(tb, TCA_CBQ_MAX, opt, cbq_policy,
-                                         extack);
+       err = cbq_opt_parse(tb, opt, extack);
        if (err < 0)
                return err;
 
index 1bef152c5721d1f3dccc24251c60a902f7223569..b2905b03a432f8fb46f06d939c847926171f79c3 100644 (file)
@@ -306,7 +306,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
        if (err < 0)
                goto skip;
 
-       if (ecmd.base.speed != SPEED_UNKNOWN)
+       if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
                speed = ecmd.base.speed;
 
 skip:
index bad1cbe59a562799b8e5c1b1085616fe67d4edd9..05605b30bef3abac1da2e0e821c871a54b3635ba 100644 (file)
@@ -361,6 +361,8 @@ static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
                goto errout;
 
        err = -EINVAL;
+       if (!tb[TCA_DSMARK_INDICES])
+               goto errout;
        indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
 
        if (hweight32(indices) != 1)
index cebfb65d85568c3afa48c52d1f05887fd59b937b..b1da5589a0c6a2c9db9912430dca3d5f06fff10c 100644 (file)
@@ -177,7 +177,7 @@ static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
 
                parent = *p;
                skb = rb_to_skb(parent);
-               if (ktime_after(txtime, skb->tstamp)) {
+               if (ktime_compare(txtime, skb->tstamp) >= 0) {
                        p = &parent->rb_right;
                        leftmost = false;
                } else {
index 17bd8f539bc7f1d596e97c713467f953802c9b82..8769b4b8807d9d6638619059230462750bdff2ee 100644 (file)
@@ -799,9 +799,6 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
 };
 EXPORT_SYMBOL(pfifo_fast_ops);
 
-static struct lock_class_key qdisc_tx_busylock;
-static struct lock_class_key qdisc_running_key;
-
 struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
                          const struct Qdisc_ops *ops,
                          struct netlink_ext_ack *extack)
@@ -854,17 +851,9 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        }
 
        spin_lock_init(&sch->busylock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        /* seqlock has the same scope of busylock, for NOLOCK qdisc */
        spin_lock_init(&sch->seqlock);
-       lockdep_set_class(&sch->busylock,
-                         dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
-
        seqcount_init(&sch->running);
-       lockdep_set_class(&sch->running,
-                         dev->qdisc_running_key ?: &qdisc_running_key);
 
        sch->ops = ops;
        sch->flags = ops->static_flags;
@@ -875,6 +864,12 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
        dev_hold(dev);
        refcount_set(&sch->refcnt, 1);
 
+       if (sch != &noop_qdisc) {
+               lockdep_set_class(&sch->busylock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->seqlock, &dev->qdisc_tx_busylock_key);
+               lockdep_set_class(&sch->running, &dev->qdisc_running_key);
+       }
+
        return sch;
 errout1:
        kfree(p);
@@ -1043,6 +1038,8 @@ static void attach_one_default_qdisc(struct net_device *dev,
 
        if (dev->priv_flags & IFF_NO_QUEUE)
                ops = &noqueue_qdisc_ops;
+       else if(dev->type == ARPHRD_CAN)
+               ops = &pfifo_fast_ops;
 
        qdisc = qdisc_create_dflt(dev_queue, ops, TC_H_ROOT, NULL);
        if (!qdisc) {
index 23cd1c873a2cfc24b907a8177e71dd68b0939701..be35f03b657b1eb10d224594981aa44e1776801c 100644 (file)
@@ -5,11 +5,11 @@
  * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com>
  */
 
-#include <linux/jhash.h>
 #include <linux/jiffies.h>
 #include <linux/module.h>
 #include <linux/skbuff.h>
 #include <linux/vmalloc.h>
+#include <linux/siphash.h>
 #include <net/pkt_sched.h>
 #include <net/sock.h>
 
@@ -126,7 +126,7 @@ struct wdrr_bucket {
 
 struct hhf_sched_data {
        struct wdrr_bucket buckets[WDRR_BUCKET_CNT];
-       u32                perturbation;   /* hash perturbation */
+       siphash_key_t      perturbation;   /* hash perturbation */
        u32                quantum;        /* psched_mtu(qdisc_dev(sch)); */
        u32                drop_overlimit; /* number of times max qdisc packet
                                            * limit was hit
@@ -264,7 +264,7 @@ static enum wdrr_bucket_idx hhf_classify(struct sk_buff *skb, struct Qdisc *sch)
        }
 
        /* Get hashed flow-id of the skb. */
-       hash = skb_get_hash_perturb(skb, q->perturbation);
+       hash = skb_get_hash_perturb(skb, &q->perturbation);
 
        /* Check if this packet belongs to an already established HH flow. */
        flow_pos = hash & HHF_BIT_MASK;
@@ -582,7 +582,7 @@ static int hhf_init(struct Qdisc *sch, struct nlattr *opt,
 
        sch->limit = 1000;
        q->quantum = psched_mtu(qdisc_dev(sch));
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        INIT_LIST_HEAD(&q->new_buckets);
        INIT_LIST_HEAD(&q->old_buckets);
 
index 0e44039e729c72be52d0bf65568b3641e7f910d8..42e557d48e4e3f85ef9f6edc3c3bbe3c4df6eacd 100644 (file)
@@ -509,6 +509,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                if (skb->ip_summed == CHECKSUM_PARTIAL &&
                    skb_checksum_help(skb)) {
                        qdisc_drop(skb, sch, to_free);
+                       skb = NULL;
                        goto finish_segs;
                }
 
@@ -593,9 +594,10 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 finish_segs:
        if (segs) {
                unsigned int len, last_len;
-               int nb = 0;
+               int nb;
 
-               len = skb->len;
+               len = skb ? skb->len : 0;
+               nb = skb ? 1 : 0;
 
                while (segs) {
                        skb2 = segs->next;
@@ -612,7 +614,10 @@ finish_segs:
                        }
                        segs = skb2;
                }
-               qdisc_tree_reduce_backlog(sch, -nb, prev_len - len);
+               /* Parent qdiscs accounted for 1 skb of size @prev_len */
+               qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len));
+       } else if (!skb) {
+               return NET_XMIT_DROP;
        }
        return NET_XMIT_SUCCESS;
 }
index d448fe3068e5b9f0b35a37d921ab5a74350dc155..4074c50ac3d73100dd82bd631c127326ff49f355 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/errno.h>
 #include <linux/skbuff.h>
 #include <linux/random.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <net/ip.h>
 #include <net/pkt_sched.h>
 #include <net/pkt_cls.h>
@@ -45,7 +45,7 @@ struct sfb_bucket {
  * (Section 4.4 of SFB reference : moving hash functions)
  */
 struct sfb_bins {
-       u32               perturbation; /* jhash perturbation */
+       siphash_key_t     perturbation; /* siphash key */
        struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
 };
 
@@ -217,7 +217,8 @@ static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_da
 
 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
 {
-       q->bins[slot].perturbation = prandom_u32();
+       get_random_bytes(&q->bins[slot].perturbation,
+                        sizeof(q->bins[slot].perturbation));
 }
 
 static void sfb_swap_slot(struct sfb_sched_data *q)
@@ -314,9 +315,9 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* If using external classifiers, get result and record it. */
                if (!sfb_classify(skb, fl, &ret, &salt))
                        goto other_drop;
-               sfbhash = jhash_1word(salt, q->bins[slot].perturbation);
+               sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
        } else {
-               sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation);
+               sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
        }
 
 
@@ -352,7 +353,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
                /* Inelastic flow */
                if (q->double_buffering) {
                        sfbhash = skb_get_hash_perturb(skb,
-                           q->bins[slot].perturbation);
+                           &q->bins[slot].perturbation);
                        if (!sfbhash)
                                sfbhash = 1;
                        sfb_skb_cb(skb)->hashes[slot] = sfbhash;
index 68404a9d2ce426f7027d83f0adde634cb031ef4a..c787d4d46017b4b41b8eb6d41f2b0a44560ff5bf 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/skbuff.h>
-#include <linux/jhash.h>
+#include <linux/siphash.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include <net/netlink.h>
@@ -117,7 +117,7 @@ struct sfq_sched_data {
        u8              headdrop;
        u8              maxdepth;       /* limit of packets per flow */
 
-       u32             perturbation;
+       siphash_key_t   perturbation;
        u8              cur_depth;      /* depth of longest slot */
        u8              flags;
        unsigned short  scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
@@ -157,7 +157,7 @@ static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index
 static unsigned int sfq_hash(const struct sfq_sched_data *q,
                             const struct sk_buff *skb)
 {
-       return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1);
+       return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
 }
 
 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -607,9 +607,11 @@ static void sfq_perturbation(struct timer_list *t)
        struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
        struct Qdisc *sch = q->sch;
        spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
+       siphash_key_t nkey;
 
+       get_random_bytes(&nkey, sizeof(nkey));
        spin_lock(root_lock);
-       q->perturbation = prandom_u32();
+       q->perturbation = nkey;
        if (!q->filter_list && q->tail)
                sfq_rehash(sch);
        spin_unlock(root_lock);
@@ -688,7 +690,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
        del_timer(&q->perturb_timer);
        if (q->perturb_period) {
                mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
-               q->perturbation = prandom_u32();
+               get_random_bytes(&q->perturbation, sizeof(q->perturbation));
        }
        sch_tree_unlock(sch);
        kfree(p);
@@ -745,7 +747,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
        q->quantum = psched_mtu(qdisc_dev(sch));
        q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
        q->perturb_period = 0;
-       q->perturbation = prandom_u32();
+       get_random_bytes(&q->perturbation, sizeof(q->perturbation));
 
        if (opt) {
                int err = sfq_change(sch, opt);
index 2f7b34205c826fdc100ca559aa3b87575a3fca11..2121187229cd906c67e134d8ae78a65620f3296d 100644 (file)
@@ -1044,12 +1044,11 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
        if (err < 0)
                goto skip;
 
-       if (ecmd.base.speed != SPEED_UNKNOWN)
+       if (ecmd.base.speed && ecmd.base.speed != SPEED_UNKNOWN)
                speed = ecmd.base.speed;
 
 skip:
-       picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8,
-                                  speed * 1000 * 1000);
+       picos_per_byte = (USEC_PER_SEC * 8) / speed;
 
        atomic64_set(&q->picos_per_byte, picos_per_byte);
        netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
@@ -1153,7 +1152,7 @@ EXPORT_SYMBOL_GPL(taprio_offload_free);
  * offload state (PENDING, ACTIVE, INACTIVE) so it can be visible in dump().
  * This is left as TODO.
  */
-void taprio_offload_config_changed(struct taprio_sched *q)
+static void taprio_offload_config_changed(struct taprio_sched *q)
 {
        struct sched_gate_list *oper, *admin;
 
@@ -1342,6 +1341,10 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb,
                NL_SET_ERR_MSG(extack, "Specifying a 'clockid' is mandatory");
                goto out;
        }
+
+       /* Everything went ok, return success. */
+       err = 0;
+
 out:
        return err;
 }
index fc9a4c6629ce04b38b3882666ef0abbd5ea8705d..0851166b917597b08becf9bf9d5873287b375828 100644 (file)
@@ -175,7 +175,7 @@ static int inet_sctp_diag_fill(struct sock *sk, struct sctp_association *asoc,
                mem[SK_MEMINFO_FWD_ALLOC] = sk->sk_forward_alloc;
                mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued;
                mem[SK_MEMINFO_OPTMEM] = atomic_read(&sk->sk_omem_alloc);
-               mem[SK_MEMINFO_BACKLOG] = sk->sk_backlog.len;
+               mem[SK_MEMINFO_BACKLOG] = READ_ONCE(sk->sk_backlog.len);
                mem[SK_MEMINFO_DROPS] = atomic_read(&sk->sk_drops);
 
                if (nla_put(skb, INET_DIAG_SKMEMINFO, sizeof(mem), &mem) < 0)
index 1008cdc44dd61a4e940dac694fa3af9aa411f891..2277981559d0700669807dff4981e23e03be66ef 100644 (file)
@@ -201,7 +201,7 @@ int sctp_rcv(struct sk_buff *skb)
 
        if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family))
                goto discard_release;
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (sk_filter(sk, skb))
                goto discard_release;
@@ -243,7 +243,7 @@ int sctp_rcv(struct sk_buff *skb)
                bh_lock_sock(sk);
        }
 
-       if (sock_owned_by_user(sk)) {
+       if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
                if (sctp_add_backlog(sk, skb)) {
                        bh_unlock_sock(sk);
                        sctp_chunk_free(chunk);
@@ -321,8 +321,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                local_bh_disable();
                bh_lock_sock(sk);
 
-               if (sock_owned_by_user(sk)) {
-                       if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
+               if (sock_owned_by_user(sk) || !sctp_newsk_ready(sk)) {
+                       if (sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
                                sctp_chunk_free(chunk);
                        else
                                backloged = 1;
@@ -336,7 +336,13 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb)
                if (backloged)
                        return 0;
        } else {
-               sctp_inq_push(inqueue, chunk);
+               if (!sctp_newsk_ready(sk)) {
+                       if (!sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf)))
+                               return 0;
+                       sctp_chunk_free(chunk);
+               } else {
+                       sctp_inq_push(inqueue, chunk);
+               }
        }
 
 done:
@@ -358,7 +364,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb)
        struct sctp_ep_common *rcvr = chunk->rcvr;
        int ret;
 
-       ret = sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+       ret = sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
        if (!ret) {
                /* Hold the assoc/ep while hanging on the backlog queue.
                 * This way, we know structures we need will not disappear
index e41ed2e0ae7d0f6a7efe4193990c7156fa859686..48d63956a68c09c9fd1893c74c4594859c60671e 100644 (file)
@@ -2155,7 +2155,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
        case SCTP_PARAM_SET_PRIMARY:
                if (ep->asconf_enable)
                        break;
-               goto fallthrough;
+               goto unhandled;
 
        case SCTP_PARAM_HOST_NAME_ADDRESS:
                /* Tell the peer, we won't support this param.  */
@@ -2166,11 +2166,11 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
        case SCTP_PARAM_FWD_TSN_SUPPORT:
                if (ep->prsctp_enable)
                        break;
-               goto fallthrough;
+               goto unhandled;
 
        case SCTP_PARAM_RANDOM:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                /* SCTP-AUTH: Secion 6.1
                 * If the random number is not 32 byte long the association
@@ -2187,7 +2187,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
 
        case SCTP_PARAM_CHUNKS:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                /* SCTP-AUTH: Section 3.2
                 * The CHUNKS parameter MUST be included once in the INIT or
@@ -2203,7 +2203,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
 
        case SCTP_PARAM_HMAC_ALGO:
                if (!ep->auth_enable)
-                       goto fallthrough;
+                       goto unhandled;
 
                hmacs = (struct sctp_hmac_algo_param *)param.p;
                n_elt = (ntohs(param.p->length) -
@@ -2226,7 +2226,7 @@ static enum sctp_ierror sctp_verify_param(struct net *net,
                        retval = SCTP_IERROR_ABORT;
                }
                break;
-fallthrough:
+unhandled:
        default:
                pr_debug("%s: unrecognized param:%d for chunk:%d\n",
                         __func__, ntohs(param.p->type), cid);
index 939b8d2595bcafd45de7eb4116179dbf56547f2d..ffd3262b7a41eac2e3d825c3f0665066f376ea3c 100644 (file)
@@ -8476,7 +8476,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
        mask = 0;
 
        /* Is there any exceptional events?  */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
        if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -8485,7 +8485,7 @@ __poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
                mask |= EPOLLHUP;
 
        /* Is it readable?  Reconsider this code with TCP-style support.  */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* The association is either gone or not ready.  */
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
                if (sk_can_busy_loop(sk)) {
                        sk_busy_loop(sk, noblock);
 
-                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                                continue;
                }
 
@@ -9306,7 +9306,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
        newinet->inet_rcv_saddr = inet->inet_rcv_saddr;
        newinet->inet_dport = htons(asoc->peer.port);
        newinet->pmtudisc = inet->pmtudisc;
-       newinet->inet_id = asoc->next_tsn ^ jiffies;
+       newinet->inet_id = prandom_u32();
 
        newinet->uc_ttl = inet->uc_ttl;
        newinet->mc_loop = 1;
@@ -9500,7 +9500,7 @@ struct proto sctp_prot = {
        .backlog_rcv =  sctp_backlog_rcv,
        .hash        =  sctp_hash,
        .unhash      =  sctp_unhash,
-       .get_port    =  sctp_get_port,
+       .no_autobind =  true,
        .obj_size    =  sizeof(struct sctp_sock),
        .useroffset  =  offsetof(struct sctp_sock, subscribe),
        .usersize    =  offsetof(struct sctp_sock, initmsg) -
@@ -9542,7 +9542,7 @@ struct proto sctpv6_prot = {
        .backlog_rcv    = sctp_backlog_rcv,
        .hash           = sctp_hash,
        .unhash         = sctp_unhash,
-       .get_port       = sctp_get_port,
+       .no_autobind    = true,
        .obj_size       = sizeof(struct sctp6_sock),
        .useroffset     = offsetof(struct sctp6_sock, sctp.subscribe),
        .usersize       = offsetof(struct sctp6_sock, sctp.initmsg) -
index 5b932583e4076feacb786d315bc4d6181fd9a17b..47946f489fd4f4a7bfb771a4062d962b818670ca 100644 (file)
@@ -123,6 +123,12 @@ struct proto smc_proto6 = {
 };
 EXPORT_SYMBOL_GPL(smc_proto6);
 
+static void smc_restore_fallback_changes(struct smc_sock *smc)
+{
+       smc->clcsock->file->private_data = smc->sk.sk_socket;
+       smc->clcsock->file = NULL;
+}
+
 static int __smc_release(struct smc_sock *smc)
 {
        struct sock *sk = &smc->sk;
@@ -141,6 +147,7 @@ static int __smc_release(struct smc_sock *smc)
                }
                sk->sk_state = SMC_CLOSED;
                sk->sk_state_change(sk);
+               smc_restore_fallback_changes(smc);
        }
 
        sk->sk_prot->unhash(sk);
@@ -700,8 +707,6 @@ static int __smc_connect(struct smc_sock *smc)
        int smc_type;
        int rc = 0;
 
-       sock_hold(&smc->sk); /* sock put in passive closing */
-
        if (smc->use_fallback)
                return smc_connect_fallback(smc, smc->fallback_rsn);
 
@@ -846,6 +851,8 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
        rc = kernel_connect(smc->clcsock, addr, alen, flags);
        if (rc && rc != -EINPROGRESS)
                goto out;
+
+       sock_hold(&smc->sk); /* sock put in passive closing */
        if (flags & O_NONBLOCK) {
                if (schedule_work(&smc->connect_work))
                        smc->connect_nonblock = 1;
@@ -1291,8 +1298,8 @@ static void smc_listen_work(struct work_struct *work)
        /* check if RDMA is available */
        if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
                /* prepare RDMA check */
-               memset(&ini, 0, sizeof(ini));
                ini.is_smcd = false;
+               ini.ism_dev = NULL;
                ini.ib_lcl = &pclc->lcl;
                rc = smc_find_rdma_device(new_smc, &ini);
                if (rc) {
index 4ca50ddf8d1619a78e8a119ef2701d69957a8cd9..2ba97ff325a5d1eb90761ad47f1a8fa5d2f743d3 100644 (file)
@@ -213,7 +213,7 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
        lgr = kzalloc(sizeof(*lgr), GFP_KERNEL);
        if (!lgr) {
                rc = SMC_CLC_DECL_MEM;
-               goto out;
+               goto ism_put_vlan;
        }
        lgr->is_smcd = ini->is_smcd;
        lgr->sync_err = 0;
@@ -289,6 +289,9 @@ clear_llc_lnk:
        smc_llc_link_clear(lnk);
 free_lgr:
        kfree(lgr);
+ism_put_vlan:
+       if (ini->is_smcd && ini->vlan_id)
+               smc_ism_put_vlan(ini->ism_dev, ini->vlan_id);
 out:
        if (rc < 0) {
                if (rc == -ENOMEM)
@@ -558,7 +561,7 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini)
        }
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index bab2da8cf17af875db0348a80b1d0488272f4990..2920b006f65c00f0f99a92e5625b895ad292fecd 100644 (file)
@@ -718,7 +718,7 @@ static struct net_device *pnet_find_base_ndev(struct net_device *ndev)
        int i, nest_lvl;
 
        rtnl_lock();
-       nest_lvl = dev_get_nest_level(ndev);
+       nest_lvl = ndev->lower_level;
        for (i = 0; i < nest_lvl; i++) {
                struct list_head *lower = &ndev->adj_list.lower;
 
index 413a6abf227ef13dacc8c6c20ccb92aa953a1bf0..97e8369002d71e970d4c01589cf9f671fddde6ea 100644 (file)
@@ -211,8 +211,7 @@ int smc_rx_wait(struct smc_sock *smc, long *timeo,
        rc = sk_wait_event(sk, timeo,
                           sk->sk_err ||
                           sk->sk_shutdown & RCV_SHUTDOWN ||
-                          fcrit(conn) ||
-                          smc_cdc_rxed_any_close_or_senddone(conn),
+                          fcrit(conn),
                           &wait);
        remove_wait_queue(sk_sleep(sk), &wait);
        sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
@@ -262,6 +261,18 @@ static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
        return -EAGAIN;
 }
 
+static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
+{
+       struct smc_connection *conn = &smc->conn;
+
+       if (smc_rx_data_available(conn))
+               return true;
+       else if (conn->urg_state == SMC_URG_VALID)
+               /* we received a single urgent Byte - skip */
+               smc_rx_update_cons(smc, 0);
+       return false;
+}
+
 /* smc_rx_recvmsg - receive data from RMBE
  * @msg:       copy data to receive buffer
  * @pipe:      copy data to pipe if set - indicates splice() call
@@ -303,16 +314,18 @@ int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
                if (read_done >= target || (pipe && read_done))
                        break;
 
-               if (atomic_read(&conn->bytes_to_rcv))
+               if (smc_rx_recvmsg_data_available(smc))
                        goto copy;
-               else if (conn->urg_state == SMC_URG_VALID)
-                       /* we received a single urgent Byte - skip */
-                       smc_rx_update_cons(smc, 0);
 
                if (sk->sk_shutdown & RCV_SHUTDOWN ||
-                   smc_cdc_rxed_any_close_or_senddone(conn) ||
-                   conn->local_tx_ctrl.conn_state_flags.peer_conn_abort)
+                   conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) {
+                       /* smc_cdc_msg_recv_action() could have run after
+                        * above smc_rx_recvmsg_data_available()
+                        */
+                       if (smc_rx_recvmsg_data_available(smc))
+                               goto copy;
                        break;
+               }
 
                if (read_done) {
                        if (sk->sk_err ||
index 339e8c077c2db9dadf79e6498b2361c483753be3..195b40c5dae4ba8cb364c46d4631680b257bb7fa 100644 (file)
@@ -220,7 +220,7 @@ void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs)
                goto out;
 
        spin_lock_bh(&xprt->bc_pa_lock);
-       xprt->bc_alloc_max -= max_reqs;
+       xprt->bc_alloc_max -= min(max_reqs, xprt->bc_alloc_max);
        list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
                dprintk("RPC:        req=%p\n", req);
                list_del(&req->rq_bc_pa_list);
@@ -307,8 +307,8 @@ void xprt_free_bc_rqst(struct rpc_rqst *req)
                 */
                dprintk("RPC:       Last session removed req=%p\n", req);
                xprt_free_allocation(req);
-               return;
        }
+       xprt_put(xprt);
 }
 
 /*
@@ -339,7 +339,7 @@ found:
                spin_unlock(&xprt->bc_pa_lock);
                if (new) {
                        if (req != new)
-                               xprt_free_bc_rqst(new);
+                               xprt_free_allocation(new);
                        break;
                } else if (req)
                        break;
@@ -368,6 +368,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
index 8a45b3ccc31343aa089e9bed8837247256ecd61d..41df4c507193b39410f8c99f65c9fa0f795380c1 100644 (file)
@@ -1942,6 +1942,11 @@ static void xprt_destroy_cb(struct work_struct *work)
        rpc_destroy_wait_queue(&xprt->sending);
        rpc_destroy_wait_queue(&xprt->backlog);
        kfree(xprt->servername);
+       /*
+        * Destroy any existing back channel
+        */
+       xprt_destroy_backchannel(xprt, UINT_MAX);
+
        /*
         * Tear down transport state and free the rpc_xprt
         */
index 50e075fcdd8feb13bc9747c613373e9b2570c073..b458bf53ca699adc271a790b7cb082af8aa31633 100644 (file)
@@ -163,6 +163,7 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
        spin_lock(&xprt->bc_pa_lock);
        list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list);
        spin_unlock(&xprt->bc_pa_lock);
+       xprt_put(xprt);
 }
 
 static struct rpc_rqst *rpcrdma_bc_rqst_get(struct rpcrdma_xprt *r_xprt)
@@ -259,6 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
 
        /* Queue rqst for ULP's callback service */
        bc_serv = xprt->bc_serv;
+       xprt_get(xprt);
        spin_lock(&bc_serv->sv_cb_lock);
        list_add(&rqst->rq_bc_list, &bc_serv->sv_cb_list);
        spin_unlock(&bc_serv->sv_cb_lock);
index 9ac88722fa83ab953ba006a9c7777e7077fca532..70e52f567b2a2c84e7a33787df3296c3ae23e0cc 100644 (file)
@@ -1249,19 +1249,21 @@ static void xs_error_report(struct sock *sk)
 {
        struct sock_xprt *transport;
        struct rpc_xprt *xprt;
-       int err;
 
        read_lock_bh(&sk->sk_callback_lock);
        if (!(xprt = xprt_from_sock(sk)))
                goto out;
 
        transport = container_of(xprt, struct sock_xprt, xprt);
-       err = -sk->sk_err;
-       if (err == 0)
+       transport->xprt_err = -sk->sk_err;
+       if (transport->xprt_err == 0)
                goto out;
        dprintk("RPC:       xs_error_report client %p, error=%d...\n",
-                       xprt, -err);
-       trace_rpc_socket_error(xprt, sk->sk_socket, err);
+                       xprt, -transport->xprt_err);
+       trace_rpc_socket_error(xprt, sk->sk_socket, transport->xprt_err);
+
+       /* barrier ensures xprt_err is set before XPRT_SOCK_WAKE_ERROR */
+       smp_mb__before_atomic();
        xs_run_error_worker(transport, XPRT_SOCK_WAKE_ERROR);
  out:
        read_unlock_bh(&sk->sk_callback_lock);
@@ -2476,7 +2478,6 @@ static void xs_wake_write(struct sock_xprt *transport)
 static void xs_wake_error(struct sock_xprt *transport)
 {
        int sockerr;
-       int sockerr_len = sizeof(sockerr);
 
        if (!test_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
                return;
@@ -2485,9 +2486,7 @@ static void xs_wake_error(struct sock_xprt *transport)
                goto out;
        if (!test_and_clear_bit(XPRT_SOCK_WAKE_ERROR, &transport->sock_state))
                goto out;
-       if (kernel_getsockopt(transport->sock, SOL_SOCKET, SO_ERROR,
-                               (char *)&sockerr, &sockerr_len) != 0)
-               goto out;
+       sockerr = xchg(&transport->xprt_err, 0);
        if (sockerr < 0)
                xprt_wake_pending_tasks(&transport->xprt, sockerr);
 out:
index 6cc75ffd9e2c2741c7734c173609bc0f30ce679e..999eab592de8fe1bfd796c66f7a4349aa1ba20e6 100644 (file)
@@ -160,6 +160,7 @@ struct tipc_link {
        struct {
                u16 len;
                u16 limit;
+               struct sk_buff *target_bskb;
        } backlog[5];
        u16 snd_nxt;
        u16 window;
@@ -880,6 +881,7 @@ static void link_prepare_wakeup(struct tipc_link *l)
 void tipc_link_reset(struct tipc_link *l)
 {
        struct sk_buff_head list;
+       u32 imp;
 
        __skb_queue_head_init(&list);
 
@@ -901,11 +903,10 @@ void tipc_link_reset(struct tipc_link *l)
        __skb_queue_purge(&l->deferdq);
        __skb_queue_purge(&l->backlogq);
        __skb_queue_purge(&l->failover_deferdq);
-       l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
-       l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
-       l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
-       l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
-       l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
+       for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
+               l->backlog[imp].len = 0;
+               l->backlog[imp].target_bskb = NULL;
+       }
        kfree_skb(l->reasm_buf);
        kfree_skb(l->reasm_tnlmsg);
        kfree_skb(l->failover_reasm_skb);
@@ -947,7 +948,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
        struct sk_buff_head *transmq = &l->transmq;
        struct sk_buff_head *backlogq = &l->backlogq;
-       struct sk_buff *skb, *_skb, *bskb;
+       struct sk_buff *skb, *_skb, **tskb;
        int pkt_cnt = skb_queue_len(list);
        int rc = 0;
 
@@ -999,19 +1000,21 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
                        seqno++;
                        continue;
                }
-               if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
+               tskb = &l->backlog[imp].target_bskb;
+               if (tipc_msg_bundle(*tskb, hdr, mtu)) {
                        kfree_skb(__skb_dequeue(list));
                        l->stats.sent_bundled++;
                        continue;
                }
-               if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
+               if (tipc_msg_make_bundle(tskb, hdr, mtu, l->addr)) {
                        kfree_skb(__skb_dequeue(list));
-                       __skb_queue_tail(backlogq, bskb);
-                       l->backlog[msg_importance(buf_msg(bskb))].len++;
+                       __skb_queue_tail(backlogq, *tskb);
+                       l->backlog[imp].len++;
                        l->stats.sent_bundled++;
                        l->stats.sent_bundles++;
                        continue;
                }
+               l->backlog[imp].target_bskb = NULL;
                l->backlog[imp].len += skb_queue_len(list);
                skb_queue_splice_tail_init(list, backlogq);
        }
@@ -1027,6 +1030,7 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
        u16 seqno = l->snd_nxt;
        u16 ack = l->rcv_nxt - 1;
        u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
+       u32 imp;
 
        while (skb_queue_len(&l->transmq) < l->window) {
                skb = skb_peek(&l->backlogq);
@@ -1037,7 +1041,10 @@ static void tipc_link_advance_backlog(struct tipc_link *l,
                        break;
                __skb_dequeue(&l->backlogq);
                hdr = buf_msg(skb);
-               l->backlog[msg_importance(hdr)].len--;
+               imp = msg_importance(hdr);
+               l->backlog[imp].len--;
+               if (unlikely(skb == l->backlog[imp].target_bskb))
+                       l->backlog[imp].target_bskb = NULL;
                __skb_queue_tail(&l->transmq, skb);
                /* next retransmit attempt */
                if (link_is_bc_sndlink(l))
index e6d49cdc61b40becff116590b668abdfcb99dc0b..922d262e153ff40a03de9125c0fe52300857fc8f 100644 (file)
@@ -543,10 +543,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb,  struct tipc_msg *msg,
        bmsg = buf_msg(_skb);
        tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
                      INT_H_SIZE, dnode);
-       if (msg_isdata(msg))
-               msg_set_importance(bmsg, TIPC_CRITICAL_IMPORTANCE);
-       else
-               msg_set_importance(bmsg, TIPC_SYSTEM_IMPORTANCE);
+       msg_set_importance(bmsg, msg_importance(msg));
        msg_set_seqno(bmsg, msg_seqno(msg));
        msg_set_ack(bmsg, msg_ack(msg));
        msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
index 3b9f8cc328f5c3aa2f39b5a8267bd206e714835f..4b92b196cfa668c9ee59b9dae4e83bb7b5b5ba3d 100644 (file)
@@ -740,7 +740,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                /* fall through */
        case TIPC_LISTEN:
        case TIPC_CONNECTING:
-               if (!skb_queue_empty(&sk->sk_receive_queue))
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                        revents |= EPOLLIN | EPOLLRDNORM;
                break;
        case TIPC_OPEN:
@@ -748,7 +748,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
                        revents |= EPOLLOUT;
                if (!tipc_sk_type_connectionless(sk))
                        break;
-               if (skb_queue_empty(&sk->sk_receive_queue))
+               if (skb_queue_empty_lockless(&sk->sk_receive_queue))
                        break;
                revents |= EPOLLIN | EPOLLRDNORM;
                break;
@@ -2119,13 +2119,13 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
        struct tipc_msg *hdr = buf_msg(skb);
 
        if (unlikely(msg_in_group(hdr)))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        if (unlikely(!msg_connected(hdr)))
-               return sk->sk_rcvbuf << msg_importance(hdr);
+               return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
 
        if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
-               return sk->sk_rcvbuf;
+               return READ_ONCE(sk->sk_rcvbuf);
 
        return FLOWCTL_MSG_LIM;
 }
@@ -3790,7 +3790,7 @@ int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
        i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
        i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
        i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
-       i += scnprintf(buf + i, sz - i, " | %d\n", sk->sk_backlog.len);
+       i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
 
        if (dqueues & TIPC_DUMP_SK_SNDQ) {
                i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
index 67e87db5877f1132b52b0c93d349983873fec38f..0d8da809bea2e6053c6bd5687ae19ab3f3ca617a 100644 (file)
@@ -2599,7 +2599,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
                mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
@@ -2628,7 +2628,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
        mask = 0;
 
        /* exceptional events? */
-       if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
+       if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
                mask |= EPOLLERR |
                        (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
 
@@ -2638,7 +2638,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
                mask |= EPOLLHUP;
 
        /* readable? */
-       if (!skb_queue_empty(&sk->sk_receive_queue))
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
                mask |= EPOLLIN | EPOLLRDNORM;
 
        /* Connection-based need to check for termination and startup */
index ab47bf3ab66e8200c9fcfb6462dceba6a4b72787..582a3e4dfce295fa67100468335bc8dbf8dc1095 100644 (file)
@@ -638,7 +638,7 @@ struct sock *__vsock_create(struct net *net,
 }
 EXPORT_SYMBOL_GPL(__vsock_create);
 
-static void __vsock_release(struct sock *sk)
+static void __vsock_release(struct sock *sk, int level)
 {
        if (sk) {
                struct sk_buff *skb;
@@ -648,9 +648,17 @@ static void __vsock_release(struct sock *sk)
                vsk = vsock_sk(sk);
                pending = NULL; /* Compiler warning. */
 
+               /* The release call is supposed to use lock_sock_nested()
+                * rather than lock_sock(), if a sock lock should be acquired.
+                */
                transport->release(vsk);
 
-               lock_sock(sk);
+               /* When "level" is SINGLE_DEPTH_NESTING, use the nested
+                * version to avoid the warning "possible recursive locking
+                * detected". When "level" is 0, lock_sock_nested(sk, level)
+                * is the same as lock_sock(sk).
+                */
+               lock_sock_nested(sk, level);
                sock_orphan(sk);
                sk->sk_shutdown = SHUTDOWN_MASK;
 
@@ -659,7 +667,7 @@ static void __vsock_release(struct sock *sk)
 
                /* Clean up any sockets that never were accepted. */
                while ((pending = vsock_dequeue_accept(sk)) != NULL) {
-                       __vsock_release(pending);
+                       __vsock_release(pending, SINGLE_DEPTH_NESTING);
                        sock_put(pending);
                }
 
@@ -708,7 +716,7 @@ EXPORT_SYMBOL_GPL(vsock_stream_has_space);
 
 static int vsock_release(struct socket *sock)
 {
-       __vsock_release(sock->sk);
+       __vsock_release(sock->sk, 0);
        sock->sk = NULL;
        sock->state = SS_FREE;
 
@@ -862,7 +870,7 @@ static __poll_t vsock_poll(struct file *file, struct socket *sock,
                 * the queue and write as long as the socket isn't shutdown for
                 * sending.
                 */
-               if (!skb_queue_empty(&sk->sk_receive_queue) ||
+               if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
                    (sk->sk_shutdown & RCV_SHUTDOWN)) {
                        mask |= EPOLLIN | EPOLLRDNORM;
                }
index 261521d286d6e5afa71bbd010395fd2358ea805d..c443db7af8d4af073bb6a7b81df2eed36320f80a 100644 (file)
@@ -559,7 +559,7 @@ static void hvs_release(struct vsock_sock *vsk)
        struct sock *sk = sk_vsock(vsk);
        bool remove_sock;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        remove_sock = hvs_close_lock_held(vsk);
        release_sock(sk);
        if (remove_sock)
index 5bb70c692b1ee3ff40758304e6e74b2feb18f6ee..481f7f8a16551ce8de4f6e3100464a32b0033997 100644 (file)
@@ -204,10 +204,14 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
        return virtio_transport_get_ops()->send_pkt(pkt);
 }
 
-static void virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
+static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs,
                                        struct virtio_vsock_pkt *pkt)
 {
+       if (vvs->rx_bytes + pkt->len > vvs->buf_alloc)
+               return false;
+
        vvs->rx_bytes += pkt->len;
+       return true;
 }
 
 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs,
@@ -458,6 +462,9 @@ void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val)
                vvs->buf_size_max = val;
        vvs->buf_size = val;
        vvs->buf_alloc = val;
+
+       virtio_transport_send_credit_update(vsk, VIRTIO_VSOCK_TYPE_STREAM,
+                                           NULL);
 }
 EXPORT_SYMBOL_GPL(virtio_transport_set_buffer_size);
 
@@ -820,7 +827,7 @@ void virtio_transport_release(struct vsock_sock *vsk)
        struct sock *sk = &vsk->sk;
        bool remove_sock = true;
 
-       lock_sock(sk);
+       lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
        if (sk->sk_type == SOCK_STREAM)
                remove_sock = virtio_transport_close(vsk);
 
@@ -876,14 +883,18 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk,
                              struct virtio_vsock_pkt *pkt)
 {
        struct virtio_vsock_sock *vvs = vsk->trans;
-       bool free_pkt = false;
+       bool can_enqueue, free_pkt = false;
 
        pkt->len = le32_to_cpu(pkt->hdr.len);
        pkt->off = 0;
 
        spin_lock_bh(&vvs->rx_lock);
 
-       virtio_transport_inc_rx_pkt(vvs, pkt);
+       can_enqueue = virtio_transport_inc_rx_pkt(vvs, pkt);
+       if (!can_enqueue) {
+               free_pkt = true;
+               goto out;
+       }
 
        /* Try to copy small packets into the buffer of last packet queued,
         * to avoid wasting memory queueing the entire buffer with a small
index e851cafd8e2f631fb4461676155ed9210eea8ec1..fcac5c6366e16d5d40e69a4cdc9378c7ba926528 100644 (file)
@@ -204,6 +204,11 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
                return false;
        }
 
+       /* channel 14 is only for IEEE 802.11b */
+       if (chandef->center_freq1 == 2484 &&
+           chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
+               return false;
+
        if (cfg80211_chandef_is_edmg(chandef) &&
            !cfg80211_edmg_chandef_valid(chandef))
                return false;
index d21b1581a665c9ec0bdcc900d911193fd62224fb..7b72286922f7263acb9c0179d4110725a4311aba 100644 (file)
@@ -201,6 +201,38 @@ cfg80211_get_dev_from_info(struct net *netns, struct genl_info *info)
        return __cfg80211_rdev_from_attrs(netns, info->attrs);
 }
 
+static int validate_beacon_head(const struct nlattr *attr,
+                               struct netlink_ext_ack *extack)
+{
+       const u8 *data = nla_data(attr);
+       unsigned int len = nla_len(attr);
+       const struct element *elem;
+       const struct ieee80211_mgmt *mgmt = (void *)data;
+       unsigned int fixedlen = offsetof(struct ieee80211_mgmt,
+                                        u.beacon.variable);
+
+       if (len < fixedlen)
+               goto err;
+
+       if (ieee80211_hdrlen(mgmt->frame_control) !=
+           offsetof(struct ieee80211_mgmt, u.beacon))
+               goto err;
+
+       data += fixedlen;
+       len -= fixedlen;
+
+       for_each_element(elem, data, len) {
+               /* nothing */
+       }
+
+       if (for_each_element_completed(elem, data, len))
+               return 0;
+
+err:
+       NL_SET_ERR_MSG_ATTR(extack, attr, "malformed beacon head");
+       return -EINVAL;
+}
+
 static int validate_ie_attr(const struct nlattr *attr,
                            struct netlink_ext_ack *extack)
 {
@@ -338,8 +370,9 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
 
        [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 },
        [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 },
-       [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY,
-                                      .len = IEEE80211_MAX_DATA_LEN },
+       [NL80211_ATTR_BEACON_HEAD] =
+               NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_beacon_head,
+                                      IEEE80211_MAX_DATA_LEN),
        [NL80211_ATTR_BEACON_TAIL] =
                NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_ie_attr,
                                       IEEE80211_MAX_DATA_LEN),
@@ -360,7 +393,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
        [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ },
        [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY,
                                   .len = IEEE80211_MAX_MESH_ID_LEN },
-       [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 },
+       [NL80211_ATTR_MPATH_NEXT_HOP] = NLA_POLICY_ETH_ADDR_COMPAT,
 
        [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 },
        [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED },
@@ -2636,6 +2669,8 @@ int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
 
        control_freq = nla_get_u32(attrs[NL80211_ATTR_WIPHY_FREQ]);
 
+       memset(chandef, 0, sizeof(*chandef));
+
        chandef->chan = ieee80211_get_channel(&rdev->wiphy, control_freq);
        chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
        chandef->center_freq1 = control_freq;
@@ -3176,7 +3211,7 @@ static int nl80211_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flag
 
        if (rdev->ops->get_channel) {
                int ret;
-               struct cfg80211_chan_def chandef;
+               struct cfg80211_chan_def chandef = {};
 
                ret = rdev_get_channel(rdev, wdev, &chandef);
                if (ret == 0) {
@@ -6270,6 +6305,9 @@ static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->del_mpath)
                return -EOPNOTSUPP;
 
+       if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT)
+               return -EOPNOTSUPP;
+
        return rdev_del_mpath(rdev, dev, dst);
 }
 
@@ -13644,7 +13682,7 @@ static int nl80211_get_ftm_responder_stats(struct sk_buff *skb,
        hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
                             NL80211_CMD_GET_FTM_RESPONDER_STATS);
        if (!hdr)
-               return -ENOBUFS;
+               goto nla_put_failure;
 
        if (nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex))
                goto nla_put_failure;
index 5311d0ae245498fca3e9a41f6fe0d6eb42160a40..446c76d44e65a04878ceeaabda4f428e59e5b0f9 100644 (file)
@@ -2108,7 +2108,7 @@ static void reg_call_notifier(struct wiphy *wiphy,
 
 static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
 {
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
        enum nl80211_iftype iftype;
 
@@ -3883,6 +3883,7 @@ bool regulatory_pre_cac_allowed(struct wiphy *wiphy)
 
        return pre_cac_allowed;
 }
+EXPORT_SYMBOL(regulatory_pre_cac_allowed);
 
 void regulatory_propagate_dfs_state(struct wiphy *wiphy,
                                    struct cfg80211_chan_def *chandef,
index 504133d76de4de784dc7b2dccdac5a7ec11afbc0..dc8f689bd46902af8be9988283ebfaf18a1131d9 100644 (file)
@@ -155,14 +155,6 @@ bool regulatory_indoor_allowed(void);
  */
 #define REG_PRE_CAC_EXPIRY_GRACE_MS 2000
 
-/**
- * regulatory_pre_cac_allowed - if pre-CAC allowed in the current dfs domain
- * @wiphy: wiphy for which pre-CAC capability is checked.
-
- * Pre-CAC is allowed only in ETSI domain.
- */
-bool regulatory_pre_cac_allowed(struct wiphy *wiphy);
-
 /**
  * regulatory_propagate_dfs_state - Propagate DFS channel state to other wiphys
  * @wiphy - wiphy on which radar is detected and the event will be propagated
index d313c9befa23d18b4d12a512151f390bd4706400..aef240fdf8df62dc9c15b515395db7c9ee4f1a18 100644 (file)
@@ -1703,8 +1703,7 @@ cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
 static void
 cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
                                   struct cfg80211_bss *nontrans_bss,
-                                  struct ieee80211_mgmt *mgmt, size_t len,
-                                  gfp_t gfp)
+                                  struct ieee80211_mgmt *mgmt, size_t len)
 {
        u8 *ie, *new_ie, *pos;
        const u8 *nontrans_ssid, *trans_ssid, *mbssid;
@@ -1715,6 +1714,8 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
        const struct cfg80211_bss_ies *old;
        u8 cpy_len;
 
+       lockdep_assert_held(&wiphy_to_rdev(wiphy)->bss_lock);
+
        ie = mgmt->u.probe_resp.variable;
 
        new_ie_len = ielen;
@@ -1723,26 +1724,30 @@ cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
                return;
        new_ie_len -= trans_ssid[1];
        mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
-       if (!mbssid)
+       /*
+        * It's not valid to have the MBSSID element before SSID
+        * ignore if that happens - the code below assumes it is
+        * after (while copying things inbetween).
+        */
+       if (!mbssid || mbssid < trans_ssid)
                return;
        new_ie_len -= mbssid[1];
-       rcu_read_lock();
+
        nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
-       if (!nontrans_ssid) {
-               rcu_read_unlock();
+       if (!nontrans_ssid)
                return;
-       }
+
        new_ie_len += nontrans_ssid[1];
-       rcu_read_unlock();
 
        /* generate new ie for nontrans BSS
         * 1. replace SSID with nontrans BSS' SSID
         * 2. skip MBSSID IE
         */
-       new_ie = kzalloc(new_ie_len, gfp);
+       new_ie = kzalloc(new_ie_len, GFP_ATOMIC);
        if (!new_ie)
                return;
-       new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+
+       new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, GFP_ATOMIC);
        if (!new_ies)
                goto out_free;
 
@@ -1896,6 +1901,8 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
        cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
                                         &non_tx_data, gfp);
 
+       spin_lock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
+
        /* check if the res has other nontransmitting bss which is not
         * in MBSSID IE
         */
@@ -1910,8 +1917,9 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
                ies2 = rcu_access_pointer(tmp_bss->ies);
                if (ies2->tsf < ies1->tsf)
                        cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
-                                                          mgmt, len, gfp);
+                                                          mgmt, len);
        }
+       spin_unlock_bh(&wiphy_to_rdev(wiphy)->bss_lock);
 
        return res;
 }
index 419eb12c1e932d17f072b75288a7726c8b96da52..5b4ed5bbc542555a4ebd4e8fcfbb6dccf10e9f3f 100644 (file)
@@ -1559,7 +1559,8 @@ bool ieee80211_chandef_to_operating_class(struct cfg80211_chan_def *chandef,
        }
 
        if (freq == 2484) {
-               if (chandef->width > NL80211_CHAN_WIDTH_40)
+               /* channel 14 is only for IEEE 802.11b */
+               if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT)
                        return false;
 
                *op_class = 82; /* channel 14 */
index 7b6529d81c61e4e58f45c654740f26deccbeae19..cac9e28d852b3a135d5cdcdfee0faaee76cb1918 100644 (file)
@@ -798,7 +798,7 @@ static int cfg80211_wext_giwfreq(struct net_device *dev,
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
        struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
-       struct cfg80211_chan_def chandef;
+       struct cfg80211_chan_def chandef = {};
        int ret;
 
        switch (wdev->iftype) {
index c67d7a82ab132a50be6305f69df6576fd158a15e..73fd0eae08caa960eef0b72211c6fc2485cbfb43 100644 (file)
@@ -202,6 +202,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                               struct iw_point *data, char *ssid)
 {
        struct wireless_dev *wdev = dev->ieee80211_ptr;
+       int ret = 0;
 
        /* call only for station! */
        if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION))
@@ -219,7 +220,10 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
                if (ie) {
                        data->flags = 1;
                        data->length = ie[1];
-                       memcpy(ssid, ie + 2, data->length);
+                       if (data->length > IW_ESSID_MAX_SIZE)
+                               ret = -EINVAL;
+                       else
+                               memcpy(ssid, ie + 2, data->length);
                }
                rcu_read_unlock();
        } else if (wdev->wext.connect.ssid && wdev->wext.connect.ssid_len) {
@@ -229,7 +233,7 @@ int cfg80211_mgd_wext_giwessid(struct net_device *dev,
        }
        wdev_unlock(wdev);
 
-       return 0;
+       return ret;
 }
 
 int cfg80211_mgd_wext_siwap(struct net_device *dev,
index 5c111bc3c8ea5a8c079cd0d83733e7b253d47158..00e782335cb0740daa172f0c1835bcc3285ba46b 100644 (file)
@@ -55,7 +55,7 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb)
                if (!sock_owned_by_user(sk)) {
                        queued = x25_process_rx_frame(sk, skb);
                } else {
-                       queued = !sk_add_backlog(sk, skb, sk->sk_rcvbuf);
+                       queued = !sk_add_backlog(sk, skb, READ_ONCE(sk->sk_rcvbuf));
                }
                bh_unlock_sock(sk);
                sock_put(sk);
index 16d5f353163a166dd4f29a8ae2b99aea63629c22..3049af269fbf6b3fc3fc0f8d40651ba39d187ed8 100644 (file)
@@ -27,6 +27,9 @@ void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_add_rcu(&xs->list, &umem->xsk_list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
@@ -36,6 +39,9 @@ void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
 {
        unsigned long flags;
 
+       if (!xs->tx)
+               return;
+
        spin_lock_irqsave(&umem->xsk_list_lock, flags);
        list_del_rcu(&xs->list);
        spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
index fa8fbb8fa3c823aff9cb06d25f4335b662cc93c2..9044073fbf2257b0eb7632f05bc3b109d78604d8 100644 (file)
@@ -305,9 +305,8 @@ out:
 }
 EXPORT_SYMBOL(xsk_umem_consume_tx);
 
-static int xsk_zc_xmit(struct sock *sk)
+static int xsk_zc_xmit(struct xdp_sock *xs)
 {
-       struct xdp_sock *xs = xdp_sk(sk);
        struct net_device *dev = xs->dev;
 
        return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
@@ -327,11 +326,10 @@ static void xsk_destruct_skb(struct sk_buff *skb)
        sock_wfree(skb);
 }
 
-static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
-                           size_t total_len)
+static int xsk_generic_xmit(struct sock *sk)
 {
-       u32 max_batch = TX_BATCH_SIZE;
        struct xdp_sock *xs = xdp_sk(sk);
+       u32 max_batch = TX_BATCH_SIZE;
        bool sent_frame = false;
        struct xdp_desc desc;
        struct sk_buff *skb;
@@ -394,6 +392,18 @@ out:
        return err;
 }
 
+static int __xsk_sendmsg(struct sock *sk)
+{
+       struct xdp_sock *xs = xdp_sk(sk);
+
+       if (unlikely(!(xs->dev->flags & IFF_UP)))
+               return -ENETDOWN;
+       if (unlikely(!xs->tx))
+               return -ENOBUFS;
+
+       return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
+}
+
 static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 {
        bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
@@ -402,21 +412,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
 
        if (unlikely(!xsk_is_bound(xs)))
                return -ENXIO;
-       if (unlikely(!(xs->dev->flags & IFF_UP)))
-               return -ENETDOWN;
-       if (unlikely(!xs->tx))
-               return -ENOBUFS;
-       if (need_wait)
+       if (unlikely(need_wait))
                return -EOPNOTSUPP;
 
-       return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
+       return __xsk_sendmsg(sk);
 }
 
 static unsigned int xsk_poll(struct file *file, struct socket *sock,
                             struct poll_table_struct *wait)
 {
        unsigned int mask = datagram_poll(file, sock, wait);
-       struct xdp_sock *xs = xdp_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       struct xdp_sock *xs = xdp_sk(sk);
        struct net_device *dev;
        struct xdp_umem *umem;
 
@@ -426,9 +433,14 @@ static unsigned int xsk_poll(struct file *file, struct socket *sock,
        dev = xs->dev;
        umem = xs->umem;
 
-       if (umem->need_wakeup)
-               dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
-                                               umem->need_wakeup);
+       if (umem->need_wakeup) {
+               if (dev->netdev_ops->ndo_xsk_wakeup)
+                       dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
+                                                       umem->need_wakeup);
+               else
+                       /* Poll needs to drive Tx also in copy mode */
+                       __xsk_sendmsg(sk);
+       }
 
        if (xs->rx && !xskq_empty_desc(xs->rx))
                mask |= POLLIN | POLLRDNORM;
index 6088bc2dc11e3d4b72104873f1e22839346e8533..9b599ed66d97f5d638ce7667a1b2ef5effc54b2e 100644 (file)
@@ -706,7 +706,7 @@ resume:
        if (err)
                goto drop;
 
-       nf_reset(skb);
+       nf_reset_ct(skb);
 
        if (decaps) {
                sp = skb_sec_path(skb);
index 2ab4859df55ac08fec69b81a2a8f1cac6585195f..0f5131bc3342dd16929953c05209db5ad413dd79 100644 (file)
@@ -185,7 +185,7 @@ static void xfrmi_scrub_packet(struct sk_buff *skb, bool xnet)
        skb->skb_iif = 0;
        skb->ignore_df = 0;
        skb_dst_drop(skb);
-       nf_reset(skb);
+       nf_reset_ct(skb);
        nf_reset_trace(skb);
 
        if (!xnet)
index 9499b35feb922fee8c78a783f5cb0fc4fa99c75a..b1db55b50ba1644468718b07860cb114302c49dc 100644 (file)
@@ -502,7 +502,7 @@ int xfrm_output_resume(struct sk_buff *skb, int err)
        struct net *net = xs_net(skb_dst(skb)->xfrm);
 
        while (likely((err = xfrm_output_one(skb, err)) == 0)) {
-               nf_reset(skb);
+               nf_reset_ct(skb);
 
                err = skb_dst(skb)->ops->local_out(net, skb->sk, skb);
                if (unlikely(err != 1))
index 21e939235b3908ba3c363e2a570f41acd316173e..f2d1e573ea55154eb2ee4fc3dbdd47313d969b98 100644 (file)
@@ -2808,7 +2808,7 @@ static void xfrm_policy_queue_process(struct timer_list *t)
                        continue;
                }
 
-               nf_reset(skb);
+               nf_reset_ct(skb);
                skb_dst_drop(skb);
                skb_dst_set(skb, dst);
 
index 7409722727ca16fcffa9c7bc5fe03f6a749cc4bf..7048bb3594d65be6d132d4103ee801fadf087b7e 100644 (file)
@@ -3,7 +3,8 @@
 #ifndef __ASM_GOTO_WORKAROUND_H
 #define __ASM_GOTO_WORKAROUND_H
 
-/* this will bring in asm_volatile_goto macro definition
+/*
+ * This will bring in asm_volatile_goto and asm_inline macro definitions
  * if enabled by compiler and config options.
  */
 #include <linux/types.h>
 #define asm_volatile_goto(x...) asm volatile("invalid use of asm_volatile_goto")
 #endif
 
+/*
+ * asm_inline is defined as asm __inline in "include/linux/compiler_types.h"
+ * if supported by the kernel's CC (i.e CONFIG_CC_HAS_ASM_INLINE) which is not
+ * supported by CLANG.
+ */
+#ifdef asm_inline
+#undef asm_inline
+#define asm_inline asm
+#endif
+
 #define volatile(x...) volatile("")
 #endif
index e3993805822355a544aa5af4b7f44804e27a965b..4c31b305e6efc08448fd7c4a63ce1a36e7fbee1d 100644 (file)
@@ -13,6 +13,7 @@
 #include <sys/resource.h>
 #include <sys/types.h>
 #include <sys/stat.h>
+#include <linux/perf_event.h>
 
 #include "libbpf.h"
 #include "bpf_load.h"
index 4b0432e095ae03fab58064d87b85dc9387721250..10ba926ae29245976c7f716e3b17f6c5e56e989b 100644 (file)
@@ -143,11 +143,6 @@ cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || e
 # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y)
 ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
 
-# ar-option
-# Usage: KBUILD_ARFLAGS := $(call ar-option,D)
-# Important: no spaces around options
-ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
-
 # ld-version
 # Note this is mainly for HJ Lu's 3 number binutil versions
 ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
index f72aba64d611d287af477e5ab6348e055140792f..a9e47953ca53dfb06ba4e1221c7467539b4acc3b 100644 (file)
@@ -389,7 +389,7 @@ $(sort $(subdir-obj-y)): $(subdir-ym) ;
 ifdef builtin-target
 
 quiet_cmd_ar_builtin = AR      $@
-      cmd_ar_builtin = rm -f $@; $(AR) rcSTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
+      cmd_ar_builtin = rm -f $@; $(AR) cDPrST $@ $(real-prereqs)
 
 $(builtin-target): $(real-obj-y) FORCE
        $(call if_changed,ar_builtin)
index 4a0cdd6f5909a72912b9cb8a2b1692d2ee5a61cb..179d55af58529c911228b59562a86a5a5dd29af1 100644 (file)
@@ -232,7 +232,7 @@ quiet_cmd_ld = LD      $@
 # ---------------------------------------------------------------------------
 
 quiet_cmd_ar = AR      $@
-      cmd_ar = rm -f $@; $(AR) rcsTP$(KBUILD_ARFLAGS) $@ $(real-prereqs)
+      cmd_ar = rm -f $@; $(AR) cDPrsT $@ $(real-prereqs)
 
 # Objcopy
 # ---------------------------------------------------------------------------
diff --git a/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci b/scripts/coccinelle/api/devm_platform_ioremap_resource.cocci
deleted file mode 100644 (file)
index 56a2e26..0000000
+++ /dev/null
@@ -1,60 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/// Use devm_platform_ioremap_resource helper which wraps
-/// platform_get_resource() and devm_ioremap_resource() together.
-///
-// Confidence: High
-// Copyright: (C) 2019 Himanshu Jha GPLv2.
-// Copyright: (C) 2019 Julia Lawall, Inria/LIP6. GPLv2.
-// Keywords: platform_get_resource, devm_ioremap_resource,
-// Keywords: devm_platform_ioremap_resource
-
-virtual patch
-virtual report
-
-@r depends on patch && !report@
-expression e1, e2, arg1, arg2, arg3;
-identifier id;
-@@
-
-(
-- id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-- struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
-  ... when != id
-- e1 = devm_ioremap_resource(arg3, id);
-+ e1 = devm_platform_ioremap_resource(arg1, arg2);
-  ... when != id
-? id = e2
-
-@r1 depends on patch && !report@
-identifier r.id;
-type T;
-@@
-
-- T *id;
-  ...when != id
-
-@r2 depends on report && !patch@
-identifier id;
-expression e1, e2, arg1, arg2, arg3;
-position j0;
-@@
-
-(
-  id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-|
-  struct resource *id = platform_get_resource(arg1, IORESOURCE_MEM, arg2);
-)
-  ... when != id
-  e1@j0 = devm_ioremap_resource(arg3, id);
-  ... when != id
-? id = e2
-
-@script:python depends on report && !patch@
-e1 << r2.e1;
-j0 << r2.j0;
-@@
-
-msg = "WARNING: Use devm_platform_ioremap_resource for %s" % (e1)
-coccilib.report.print_report(j0[0], msg)
index c832bb6445a832e28ed0f8ed03beac72b6c3941c..99e93a6c2e240c3ba0a95ad5f214c236c97de470 100644 (file)
@@ -6,6 +6,8 @@
 /// add a missing namespace tag to a module source file.
 ///
 
+virtual report
+
 @has_ns_import@
 declarer name MODULE_IMPORT_NS;
 identifier virtual.ns;
index 6d2e09a2ad2f9204dbdb7c5c84522c45530e1342..2fa7bb83885f068e05648448267b3f590bfb2ab9 100644 (file)
@@ -16,6 +16,8 @@ import sys
 
 from linux import utils
 
+printk_log_type = utils.CachedType("struct printk_log")
+
 
 class LxDmesg(gdb.Command):
     """Print Linux kernel log buffer."""
@@ -42,9 +44,14 @@ class LxDmesg(gdb.Command):
             b = utils.read_memoryview(inf, log_buf_addr, log_next_idx)
             log_buf = a.tobytes() + b.tobytes()
 
+        length_offset = printk_log_type.get_type()['len'].bitpos // 8
+        text_len_offset = printk_log_type.get_type()['text_len'].bitpos // 8
+        time_stamp_offset = printk_log_type.get_type()['ts_nsec'].bitpos // 8
+        text_offset = printk_log_type.get_type().sizeof
+
         pos = 0
         while pos < log_buf.__len__():
-            length = utils.read_u16(log_buf[pos + 8:pos + 10])
+            length = utils.read_u16(log_buf, pos + length_offset)
             if length == 0:
                 if log_buf_2nd_half == -1:
                     gdb.write("Corrupted log buffer!\n")
@@ -52,10 +59,11 @@ class LxDmesg(gdb.Command):
                 pos = log_buf_2nd_half
                 continue
 
-            text_len = utils.read_u16(log_buf[pos + 10:pos + 12])
-            text = log_buf[pos + 16:pos + 16 + text_len].decode(
+            text_len = utils.read_u16(log_buf, pos + text_len_offset)
+            text_start = pos + text_offset
+            text = log_buf[text_start:text_start + text_len].decode(
                 encoding='utf8', errors='replace')
-            time_stamp = utils.read_u64(log_buf[pos:pos + 8])
+            time_stamp = utils.read_u64(log_buf, pos + time_stamp_offset)
 
             for line in text.splitlines():
                 msg = u"[{time:12.6f}] {line}\n".format(
index 34e40e96dee2d7f7c29014d9edbf949b5ba1d0e5..be984aa29b759cb1e42bb2c8d6e42eb4b061fb9c 100644 (file)
@@ -15,7 +15,7 @@ import gdb
 import os
 import re
 
-from linux import modules
+from linux import modules, utils
 
 
 if hasattr(gdb, 'Breakpoint'):
@@ -99,7 +99,8 @@ lx-symbols command."""
             attrs[n]['name'].string(): attrs[n]['address']
             for n in range(int(sect_attrs['nsections']))}
         args = []
-        for section_name in [".data", ".data..read_mostly", ".rodata", ".bss"]:
+        for section_name in [".data", ".data..read_mostly", ".rodata", ".bss",
+                             ".text", ".text.hot", ".text.unlikely"]:
             address = section_name_to_address.get(section_name)
             if address:
                 args.append(" -s {name} {addr}".format(
@@ -116,6 +117,12 @@ lx-symbols command."""
             module_file = self._get_module_file(module_name)
 
         if module_file:
+            if utils.is_target_arch('s390'):
+                # Module text is preceded by PLT stubs on s390.
+                module_arch = module['arch']
+                plt_offset = int(module_arch['plt_offset'])
+                plt_size = int(module_arch['plt_size'])
+                module_addr = hex(int(module_addr, 0) + plt_offset + plt_size)
             gdb.write("loading @{addr}: {filename}\n".format(
                 addr=module_addr, filename=module_file))
             cmdline = "add-symbol-file {filename} {addr}{sections}".format(
index bc67126118c4d50ff1b6d567525eb9b40090e54d..ea94221dbd392733b873129b7c9060ef9da5b9a4 100644 (file)
@@ -92,15 +92,16 @@ def read_memoryview(inf, start, length):
     return memoryview(inf.read_memory(start, length))
 
 
-def read_u16(buffer):
+def read_u16(buffer, offset):
+    buffer_val = buffer[offset:offset + 2]
     value = [0, 0]
 
-    if type(buffer[0]) is str:
-        value[0] = ord(buffer[0])
-        value[1] = ord(buffer[1])
+    if type(buffer_val[0]) is str:
+        value[0] = ord(buffer_val[0])
+        value[1] = ord(buffer_val[1])
     else:
-        value[0] = buffer[0]
-        value[1] = buffer[1]
+        value[0] = buffer_val[0]
+        value[1] = buffer_val[1]
 
     if get_target_endianness() == LITTLE_ENDIAN:
         return value[0] + (value[1] << 8)
@@ -108,18 +109,18 @@ def read_u16(buffer):
         return value[1] + (value[0] << 8)
 
 
-def read_u32(buffer):
+def read_u32(buffer, offset):
     if get_target_endianness() == LITTLE_ENDIAN:
-        return read_u16(buffer[0:2]) + (read_u16(buffer[2:4]) << 16)
+        return read_u16(buffer, offset) + (read_u16(buffer, offset + 2) << 16)
     else:
-        return read_u16(buffer[2:4]) + (read_u16(buffer[0:2]) << 16)
+        return read_u16(buffer, offset + 2) + (read_u16(buffer, offset) << 16)
 
 
-def read_u64(buffer):
+def read_u64(buffer, offset):
     if get_target_endianness() == LITTLE_ENDIAN:
-        return read_u32(buffer[0:4]) + (read_u32(buffer[4:8]) << 32)
+        return read_u32(buffer, offset) + (read_u32(buffer, offset + 4) << 32)
     else:
-        return read_u32(buffer[4:8]) + (read_u32(buffer[0:4]) << 32)
+        return read_u32(buffer, offset + 4) + (read_u32(buffer, offset) << 32)
 
 
 target_arch = None
index 3961941e8e7a9c95062bde8983c88efd001dba6d..d2a30a7b3f07cc884889555948054acb911e1fc7 100644 (file)
@@ -166,7 +166,7 @@ struct symbol {
        struct module *module;
        unsigned int crc;
        int crc_valid;
-       const char *namespace;
+       char *namespace;
        unsigned int weak:1;
        unsigned int vmlinux:1;    /* 1 if symbol is defined in vmlinux */
        unsigned int kernel:1;     /* 1 if symbol is from kernel
@@ -348,34 +348,43 @@ static enum export export_from_sec(struct elf_info *elf, unsigned int sec)
                return export_unknown;
 }
 
-static const char *sym_extract_namespace(const char **symname)
+static const char *namespace_from_kstrtabns(struct elf_info *info,
+                                           Elf_Sym *kstrtabns)
+{
+       char *value = info->ksymtab_strings + kstrtabns->st_value;
+       return value[0] ? value : NULL;
+}
+
+static void sym_update_namespace(const char *symname, const char *namespace)
 {
-       size_t n;
-       char *dupsymname;
+       struct symbol *s = find_symbol(symname);
 
-       n = strcspn(*symname, ".");
-       if (n < strlen(*symname) - 1) {
-               dupsymname = NOFAIL(strdup(*symname));
-               dupsymname[n] = '\0';
-               *symname = dupsymname;
-               return dupsymname + n + 1;
+       /*
+        * That symbol should have been created earlier and thus this is
+        * actually an assertion.
+        */
+       if (!s) {
+               merror("Could not update namespace(%s) for symbol %s\n",
+                      namespace, symname);
+               return;
        }
 
-       return NULL;
+       free(s->namespace);
+       s->namespace =
+               namespace && namespace[0] ? NOFAIL(strdup(namespace)) : NULL;
 }
 
 /**
  * Add an exported symbol - it may have already been added without a
  * CRC, in this case just update the CRC
  **/
-static struct symbol *sym_add_exported(const char *name, const char *namespace,
-                                      struct module *mod, enum export export)
+static struct symbol *sym_add_exported(const char *name, struct module *mod,
+                                      enum export export)
 {
        struct symbol *s = find_symbol(name);
 
        if (!s) {
                s = new_symbol(name, mod, export);
-               s->namespace = namespace;
        } else {
                if (!s->preloaded) {
                        warn("%s: '%s' exported twice. Previous export was in %s%s\n",
@@ -584,6 +593,10 @@ static int parse_elf(struct elf_info *info, const char *filename)
                        info->export_unused_gpl_sec = i;
                else if (strcmp(secname, "__ksymtab_gpl_future") == 0)
                        info->export_gpl_future_sec = i;
+               else if (strcmp(secname, "__ksymtab_strings") == 0)
+                       info->ksymtab_strings = (void *)hdr +
+                                               sechdrs[i].sh_offset -
+                                               sechdrs[i].sh_addr;
 
                if (sechdrs[i].sh_type == SHT_SYMTAB) {
                        unsigned int sh_link_idx;
@@ -672,7 +685,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
        unsigned int crc;
        enum export export;
        bool is_crc = false;
-       const char *name, *namespace;
+       const char *name;
 
        if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
            strstarts(symname, "__ksymtab"))
@@ -745,8 +758,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                /* All exported symbols */
                if (strstarts(symname, "__ksymtab_")) {
                        name = symname + strlen("__ksymtab_");
-                       namespace = sym_extract_namespace(&name);
-                       sym_add_exported(name, namespace, mod, export);
+                       sym_add_exported(name, mod, export);
                }
                if (strcmp(symname, "init_module") == 0)
                        mod->has_init = 1;
@@ -2042,6 +2054,16 @@ static void read_symbols(const char *modname)
                handle_moddevtable(mod, &info, sym, symname);
        }
 
+       /* Apply symbol namespaces from __kstrtabns_<symbol> entries. */
+       for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
+               symname = remove_dot(info.strtab + sym->st_name);
+
+               if (strstarts(symname, "__kstrtabns_"))
+                       sym_update_namespace(symname + strlen("__kstrtabns_"),
+                                            namespace_from_kstrtabns(&info,
+                                                                     sym));
+       }
+
        // check for static EXPORT_SYMBOL_* functions && global vars
        for (sym = info.symtab_start; sym < info.symtab_stop; sym++) {
                unsigned char bind = ELF_ST_BIND(sym->st_info);
@@ -2453,12 +2475,12 @@ static void read_dump(const char *fname, unsigned int kernel)
                        mod = new_module(modname);
                        mod->skip = 1;
                }
-               s = sym_add_exported(symname, namespace, mod,
-                                    export_no(export));
+               s = sym_add_exported(symname, mod, export_no(export));
                s->kernel    = kernel;
                s->preloaded = 1;
                s->is_static = 0;
                sym_update_crc(symname, mod, crc, export_no(export));
+               sym_update_namespace(symname, namespace);
        }
        release_file(file, size);
        return;
@@ -2652,15 +2674,20 @@ int main(int argc, char **argv)
                fatal("modpost: Section mismatches detected.\n"
                      "Set CONFIG_SECTION_MISMATCH_WARN_ONLY=y to allow them.\n");
        for (n = 0; n < SYMBOL_HASH_SIZE; n++) {
-               struct symbol *s = symbolhash[n];
+               struct symbol *s;
+
+               for (s = symbolhash[n]; s; s = s->next) {
+                       /*
+                        * Do not check "vmlinux". This avoids the same warnings
+                        * shown twice, and false-positives for ARCH=um.
+                        */
+                       if (is_vmlinux(s->module->name) && !s->module->is_dot_o)
+                               continue;
 
-               while (s) {
                        if (s->is_static)
                                warn("\"%s\" [%s] is a static %s\n",
                                     s->name, s->module->name,
                                     export_str(s->export));
-
-                       s = s->next;
                }
        }
 
index 92a926d375d287c0629facef7de45cee938aa99c..ad271bc6c3130410ab9f515b75d1efea26f89b97 100644 (file)
@@ -143,6 +143,7 @@ struct elf_info {
        Elf_Section  export_gpl_sec;
        Elf_Section  export_unused_gpl_sec;
        Elf_Section  export_gpl_future_sec;
+       char         *ksymtab_strings;
        char         *strtab;
        char         *modinfo;
        unsigned int modinfo_len;
index 6135574a6f3947c37beab1b24ead86d933eda712..1da7bca201a42453fda2a6a09f5d0356a31047cf 100755 (executable)
 use warnings;
 use strict;
 use File::Find;
+use File::Spec;
 
 my $nm = ($ENV{'NM'} || "nm") . " -p";
 my $objdump = ($ENV{'OBJDUMP'} || "objdump") . " -s -j .comment";
-my $srctree = "";
-my $objtree = "";
-$srctree = "$ENV{'srctree'}/" if (exists($ENV{'srctree'}));
-$objtree = "$ENV{'objtree'}/" if (exists($ENV{'objtree'}));
+my $srctree = File::Spec->curdir();
+my $objtree = File::Spec->curdir();
+$srctree = File::Spec->rel2abs($ENV{'srctree'}) if (exists($ENV{'srctree'}));
+$objtree = File::Spec->rel2abs($ENV{'objtree'}) if (exists($ENV{'objtree'}));
 
 if ($#ARGV != -1) {
        print STDERR "usage: $0 takes no parameters\n";
@@ -231,9 +232,9 @@ sub do_nm
        }
        ($source = $basename) =~ s/\.o$//;
        if (-e "$source.c" || -e "$source.S") {
-               $source = "$objtree$File::Find::dir/$source";
+               $source = File::Spec->catfile($objtree, $File::Find::dir, $source)
        } else {
-               $source = "$srctree$File::Find::dir/$source";
+               $source = File::Spec->catfile($srctree, $File::Find::dir, $source)
        }
        if (! -e "$source.c" && ! -e "$source.S") {
                # No obvious source, exclude the object if it is conglomerate
index ac2b6031dd1393864d4ec582d119282a2ce1fdf7..dda6fbac016e413983e18982c1ca4b32e79f3c47 100644 (file)
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 # Linux kernel symbol namespace import generator
 #
@@ -33,7 +33,7 @@ generate_deps() {
        if [ ! -f "$ns_deps_file" ]; then return; fi
        local mod_source_files=`cat $mod_file | sed -n 1p                      \
                                              | sed -e 's/\.o/\.c/g'           \
-                                             | sed "s/[^ ]* */${srctree}\/&/g"`
+                                             | sed "s|[^ ]* *|${srctree}/&|g"`
        for ns in `cat $ns_deps_file`; do
                echo "Adding namespace $ns to module $mod_name (if needed)."
                generate_deps_for_ns $ns $mod_source_files
@@ -41,7 +41,7 @@ generate_deps() {
                for source_file in $mod_source_files; do
                        sed '/MODULE_IMPORT_NS/Q' $source_file > ${source_file}.tmp
                        offset=$(wc -l ${source_file}.tmp | awk '{print $1;}')
-                       cat $source_file | grep MODULE_IMPORT_NS | sort -u >> ${source_file}.tmp
+                       cat $source_file | grep MODULE_IMPORT_NS | LANG=C sort -u >> ${source_file}.tmp
                        tail -n +$((offset +1)) ${source_file} | grep -v MODULE_IMPORT_NS >> ${source_file}.tmp
                        if ! diff -q ${source_file} ${source_file}.tmp; then
                                mv ${source_file}.tmp ${source_file}
index 8f0a278ce0af2e985e3fd80047119c93851bba87..74eab03e31d4dc2f0fe90033da7fd84166c0f0df 100644 (file)
@@ -389,11 +389,8 @@ static int nop_mcount(Elf_Shdr const *const relhdr,
                        mcountsym = get_mcountsym(sym0, relp, str0);
 
                if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
-                       if (make_nop) {
+                       if (make_nop)
                                ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
-                               if (ret < 0)
-                                       return -1;
-                       }
                        if (warn_on_notrace_sect && !once) {
                                printf("Section %s has mcount callers being ignored\n",
                                       txtname);
index 365b3c2b8f431f28a83a6641ccf2da043dc05b55..a2998b118ef9ed68ae1ffe8be1d4609beef806d5 100755 (executable)
@@ -93,7 +93,7 @@ scm_version()
        # Check for mercurial and a mercurial repo.
        if test -d .hg && hgid=`hg id 2>/dev/null`; then
                # Do we have an tagged version?  If so, latesttagdistance == 1
-               if [ "`hg log -r . --template '{latesttagdistance}'`" == "1" ]; then
+               if [ "`hg log -r . --template '{latesttagdistance}'`" = "1" ]; then
                        id=`hg log -r . --template '{latesttag}'`
                        printf '%s%s' -hg "$id"
                else
@@ -126,7 +126,7 @@ scm_version()
 
 collect_files()
 {
-       local file res
+       local file res=
 
        for file; do
                case "$file" in
index 19faace6964416f4d87062c4b0e6d5e1c3cc4e1b..35e6ca773734605ec07d793cc5ac6f6b344e9ab4 100644 (file)
@@ -13,9 +13,6 @@ integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyrin
 integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
                                        platform_certs/load_uefi.o
 integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
-$(obj)/load_uefi.o: KBUILD_CFLAGS += -fshort-wchar
 
-subdir-$(CONFIG_IMA)                   += ima
 obj-$(CONFIG_IMA)                      += ima/
-subdir-$(CONFIG_EVM)                   += evm
 obj-$(CONFIG_EVM)                      += evm/
index 8a10b43daf74e5c3b86a06e19b4856b684dacc66..40b790536defaf8dcdad0655d4423f33c94dcd06 100644 (file)
@@ -20,6 +20,7 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
        [LOCKDOWN_NONE] = "none",
        [LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
        [LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+       [LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
        [LOCKDOWN_KEXEC] = "kexec of unsigned images",
        [LOCKDOWN_HIBERNATION] = "hibernation",
        [LOCKDOWN_PCI_ACCESS] = "direct PCI access",
index 3a29e7c24ba936f38a934e78dc9893c71e97eeeb..a5813c7629c17f229f6e016cd3d5e0db86f975f7 100644 (file)
@@ -1946,7 +1946,14 @@ static int convert_context(struct context *oldc, struct context *newc, void *p)
                rc = string_to_context_struct(args->newp, NULL, s,
                                              newc, SECSID_NULL);
                if (rc == -EINVAL) {
-                       /* Retain string representation for later mapping. */
+                       /*
+                        * Retain string representation for later mapping.
+                        *
+                        * IMPORTANT: We need to copy the contents of oldc->str
+                        * back into s again because string_to_context_struct()
+                        * may have garbled it.
+                        */
+                       memcpy(s, oldc->str, oldc->len);
                        context_init(newc);
                        newc->str = s;
                        newc->len = oldc->len;
index 5c9fbf3f43407a33aeb9dcbb338d6b98c9f18dc3..6b724d2ee2de9b0de0106bf5d2586e8f77677cd7 100644 (file)
@@ -226,7 +226,8 @@ static int snd_timer_check_master(struct snd_timer_instance *master)
        return 0;
 }
 
-static int snd_timer_close_locked(struct snd_timer_instance *timeri);
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put);
 
 /*
  * open a timer instance
@@ -238,6 +239,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
 {
        struct snd_timer *timer;
        struct snd_timer_instance *timeri = NULL;
+       struct device *card_dev_to_put = NULL;
        int err;
 
        mutex_lock(&register_mutex);
@@ -261,7 +263,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                list_add_tail(&timeri->open_list, &snd_timer_slave_list);
                err = snd_timer_check_slave(timeri);
                if (err < 0) {
-                       snd_timer_close_locked(timeri);
+                       snd_timer_close_locked(timeri, &card_dev_to_put);
                        timeri = NULL;
                }
                goto unlock;
@@ -313,7 +315,7 @@ int snd_timer_open(struct snd_timer_instance **ti,
                        timeri = NULL;
 
                        if (timer->card)
-                               put_device(&timer->card->card_dev);
+                               card_dev_to_put = &timer->card->card_dev;
                        module_put(timer->module);
                        goto unlock;
                }
@@ -323,12 +325,15 @@ int snd_timer_open(struct snd_timer_instance **ti,
        timer->num_instances++;
        err = snd_timer_check_master(timeri);
        if (err < 0) {
-               snd_timer_close_locked(timeri);
+               snd_timer_close_locked(timeri, &card_dev_to_put);
                timeri = NULL;
        }
 
  unlock:
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        *ti = timeri;
        return err;
 }
@@ -338,7 +343,8 @@ EXPORT_SYMBOL(snd_timer_open);
  * close a timer instance
  * call this with register_mutex down.
  */
-static int snd_timer_close_locked(struct snd_timer_instance *timeri)
+static int snd_timer_close_locked(struct snd_timer_instance *timeri,
+                                 struct device **card_devp_to_put)
 {
        struct snd_timer *timer = timeri->timer;
        struct snd_timer_instance *slave, *tmp;
@@ -395,7 +401,7 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
                        timer->hw.close(timer);
                /* release a card refcount for safe disconnection */
                if (timer->card)
-                       put_device(&timer->card->card_dev);
+                       *card_devp_to_put = &timer->card->card_dev;
                module_put(timer->module);
        }
 
@@ -407,14 +413,18 @@ static int snd_timer_close_locked(struct snd_timer_instance *timeri)
  */
 int snd_timer_close(struct snd_timer_instance *timeri)
 {
+       struct device *card_dev_to_put = NULL;
        int err;
 
        if (snd_BUG_ON(!timeri))
                return -ENXIO;
 
        mutex_lock(&register_mutex);
-       err = snd_timer_close_locked(timeri);
+       err = snd_timer_close_locked(timeri, &card_dev_to_put);
        mutex_unlock(&register_mutex);
+       /* put_device() is called after unlock for avoiding deadlock */
+       if (card_dev_to_put)
+               put_device(card_dev_to_put);
        return err;
 }
 EXPORT_SYMBOL(snd_timer_close);
index 73fee991bd75a500d25f074d78d6c71843505881..6c1497d9f52bac0e62b0cfce5bb0e59c64c70d31 100644 (file)
@@ -252,8 +252,7 @@ end:
        return err;
 }
 
-static unsigned int
-map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
+static int map_data_channels(struct snd_bebob *bebob, struct amdtp_stream *s)
 {
        unsigned int sec, sections, ch, channels;
        unsigned int pcm, midi, location;
index 211ca85acd8c4dbbedb93d7fc1fea806b3e8ced6..cfab60d88c921c507b5a0121a8e8f44d177e4d06 100644 (file)
@@ -270,6 +270,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
 
                ret = snd_hdac_ext_bus_link_power_up(link);
 
+               /*
+                * clear the register to invalidate all the output streams
+                */
+               snd_hdac_updatew(link->ml_addr, AZX_REG_ML_LOSIDV,
+                                ML_LOSIDV_STREAM_MASK, 0);
                /*
                 *  wait for 521usec for codec to report status
                 *  HDA spec section 4.3 - Codec Discovery
index d3999e7b070542e8aeb3a435abf52f6f44b022a8..7e7be8e4dcf9c3d7dd170ad2cbea0b949ec7f9e1 100644 (file)
@@ -447,8 +447,6 @@ static void azx_int_disable(struct hdac_bus *bus)
        list_for_each_entry(azx_dev, &bus->stream_list, list)
                snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
 
-       synchronize_irq(bus->irq);
-
        /* disable SIE for all streams */
        snd_hdac_chip_writeb(bus, INTCTL, 0);
 
index 240f4ca76391fbefa80074a182dc9ab88f4394e9..cf53fbd872ee34526d277bd4f479c1a5024b32b6 100644 (file)
@@ -1348,9 +1348,9 @@ static int azx_free(struct azx *chip)
        }
 
        if (bus->chip_init) {
-               azx_stop_chip(chip);
                azx_clear_irq_pending(chip);
                azx_stop_all_streams(chip);
+               azx_stop_chip(chip);
        }
 
        if (bus->irq >= 0)
@@ -2399,6 +2399,12 @@ static const struct pci_device_id azx_ids[] = {
        /* Icelake */
        { PCI_DEVICE(0x8086, 0x34c8),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Jasperlake */
+       { PCI_DEVICE(0x8086, 0x38c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
+       /* Tigerlake */
+       { PCI_DEVICE(0x8086, 0xa0c8),
+         .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
        /* Elkhart Lake */
        { PCI_DEVICE(0x8086, 0x4b55),
          .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
index bca5de78e9ad577a08d885e1c519383797f4a3c9..b72553710ffbd91eaeaf503fe601eff9d3789258 100644 (file)
@@ -145,6 +145,7 @@ struct hdmi_spec {
        struct snd_array pins; /* struct hdmi_spec_per_pin */
        struct hdmi_pcm pcm_rec[16];
        struct mutex pcm_lock;
+       struct mutex bind_lock; /* for audio component binding */
        /* pcm_bitmap means which pcms have been assigned to pins*/
        unsigned long pcm_bitmap;
        int pcm_used;   /* counter of pcm_rec[] */
@@ -2258,7 +2259,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
 
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_jack_detect = !codec->jackpoll_interval;
        for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
                struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
@@ -2275,7 +2276,7 @@ static int generic_hdmi_init(struct hda_codec *codec)
                        snd_hda_jack_detect_enable_callback(codec, pin_nid,
                                                            jack_callback);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
        return 0;
 }
 
@@ -2382,6 +2383,7 @@ static int alloc_generic_hdmi(struct hda_codec *codec)
        spec->ops = generic_standard_hdmi_ops;
        spec->dev_num = 1;      /* initialize to 1 */
        mutex_init(&spec->pcm_lock);
+       mutex_init(&spec->bind_lock);
        snd_hdac_register_chmap_ops(&codec->core, &spec->chmap);
 
        spec->chmap.ops.get_chmap = hdmi_get_chmap;
@@ -2451,7 +2453,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
        int i;
 
        spec = container_of(acomp->audio_ops, struct hdmi_spec, drm_audio_ops);
-       mutex_lock(&spec->pcm_lock);
+       mutex_lock(&spec->bind_lock);
        spec->use_acomp_notifier = use_acomp;
        spec->codec->relaxed_resume = use_acomp;
        /* reprogram each jack detection logic depending on the notifier */
@@ -2461,7 +2463,7 @@ static void generic_acomp_notifier_set(struct drm_audio_component *acomp,
                                              get_pin(spec, i)->pin_nid,
                                              use_acomp);
        }
-       mutex_unlock(&spec->pcm_lock);
+       mutex_unlock(&spec->bind_lock);
 }
 
 /* enable / disable the notifier via master bind / unbind */
@@ -3474,6 +3476,8 @@ static int patch_nvhdmi(struct hda_codec *codec)
                nvhdmi_chmap_cea_alloc_validate_get_type;
        spec->chmap.ops.chmap_validate = nvhdmi_chmap_validate;
 
+       codec->link_down_at_suspend = 1;
+
        generic_acomp_init(codec, &nvhdmi_audio_ops, nvhdmi_port2pin);
 
        return 0;
index b000b36ac3c627307381d7f7443e29133c077a81..80f66ba85f87664945777ef3fc7cf1abef4d63a6 100644 (file)
@@ -393,6 +393,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                alc_update_coef_idx(codec, 0x10, 1<<15, 0);
                break;
        case 0x10ec0662:
@@ -408,6 +409,9 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
        case 0x10ec0672:
                alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
                break;
+       case 0x10ec0623:
+               alc_update_coef_idx(codec, 0x19, 1<<13, 0);
+               break;
        case 0x10ec0668:
                alc_update_coef_idx(codec, 0x7, 3<<13, 0);
                break;
@@ -2919,6 +2923,7 @@ enum {
        ALC269_TYPE_ALC225,
        ALC269_TYPE_ALC294,
        ALC269_TYPE_ALC300,
+       ALC269_TYPE_ALC623,
        ALC269_TYPE_ALC700,
 };
 
@@ -2954,6 +2959,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC225:
        case ALC269_TYPE_ALC294:
        case ALC269_TYPE_ALC300:
+       case ALC269_TYPE_ALC623:
        case ALC269_TYPE_ALC700:
                ssids = alc269_ssids;
                break;
@@ -5358,6 +5364,17 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
        }
 }
 
+static void alc256_fixup_dell_xps_13_headphone_noise2(struct hda_codec *codec,
+                                                     const struct hda_fixup *fix,
+                                                     int action)
+{
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       snd_hda_codec_amp_stereo(codec, 0x1a, HDA_INPUT, 0, HDA_AMP_VOLMASK, 1);
+       snd_hda_override_wcaps(codec, 0x1a, get_wcaps(codec, 0x1a) & ~AC_WCAP_IN_AMP);
+}
+
 static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
                                             const struct hda_fixup *fix,
                                             int action)
@@ -5822,6 +5839,7 @@ enum {
        ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
        ALC275_FIXUP_DELL_XPS,
        ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
+       ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2,
        ALC293_FIXUP_LENOVO_SPK_NOISE,
        ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
        ALC255_FIXUP_DELL_SPK_NOISE,
@@ -5869,6 +5887,7 @@ enum {
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_HEADSET_MIC,
        ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
        ALC299_FIXUP_PREDATOR_SPK,
        ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
@@ -6558,6 +6577,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc256_fixup_dell_xps_13_headphone_noise2,
+               .chained = true,
+               .chain_id = ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE
+       },
        [ALC293_FIXUP_LENOVO_SPK_NOISE] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc_fixup_disable_aamix,
@@ -6912,6 +6937,15 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_ASUS_HEADSET_MIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a11020 }, /* headset mic with jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
        [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -7001,17 +7035,17 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
        SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
-       SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
        SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
-       SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
        SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
        SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x080c, "Dell WYSE", ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE),
-       SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+       SND_PCI_QUIRK(0x1028, 0x082a, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE2),
        SND_PCI_QUIRK(0x1028, 0x084b, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
        SND_PCI_QUIRK(0x1028, 0x084e, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
        SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
@@ -7108,6 +7142,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
        SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
        SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
        SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
        SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
        SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7186,6 +7221,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
        SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3176, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
+       SND_PCI_QUIRK(0x17aa, 0x3178, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
        SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
        SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -7987,9 +8024,13 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC300;
                spec->gen.mixer_nid = 0; /* no loopback on ALC300 */
                break;
+       case 0x10ec0623:
+               spec->codec_variant = ALC269_TYPE_ALC623;
+               break;
        case 0x10ec0700:
        case 0x10ec0701:
        case 0x10ec0703:
+       case 0x10ec0711:
                spec->codec_variant = ALC269_TYPE_ALC700;
                spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
                alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
@@ -9187,6 +9228,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0298, "ALC298", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0299, "ALC299", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0300, "ALC300", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0623, "ALC623", patch_alc269),
        HDA_CODEC_REV_ENTRY(0x10ec0861, 0x100340, "ALC660", patch_alc861),
        HDA_CODEC_ENTRY(0x10ec0660, "ALC660-VD", patch_alc861vd),
        HDA_CODEC_ENTRY(0x10ec0861, "ALC861", patch_alc861),
@@ -9204,6 +9246,7 @@ static const struct hda_device_id snd_hda_id_realtek[] = {
        HDA_CODEC_ENTRY(0x10ec0700, "ALC700", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0701, "ALC701", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0703, "ALC703", patch_alc269),
+       HDA_CODEC_ENTRY(0x10ec0711, "ALC711", patch_alc269),
        HDA_CODEC_ENTRY(0x10ec0867, "ALC891", patch_alc662),
        HDA_CODEC_ENTRY(0x10ec0880, "ALC880", patch_alc880),
        HDA_CODEC_ENTRY(0x10ec0882, "ALC882", patch_alc882),
index e609abcf3220e71f9c600cec0aebf71067fb2003..eb709d52825965f3c53aa7432e01191569ab3193 100644 (file)
@@ -901,16 +901,20 @@ static void max98373_slot_config(struct i2c_client *i2c,
                max98373->i_slot = value & 0xF;
        else
                max98373->i_slot = 1;
-
-       max98373->reset_gpio = of_get_named_gpio(dev->of_node,
+       if (dev->of_node) {
+               max98373->reset_gpio = of_get_named_gpio(dev->of_node,
                                                "maxim,reset-gpio", 0);
-       if (!gpio_is_valid(max98373->reset_gpio)) {
-               dev_err(dev, "Looking up %s property in node %s failed %d\n",
-                       "maxim,reset-gpio", dev->of_node->full_name,
-                       max98373->reset_gpio);
+               if (!gpio_is_valid(max98373->reset_gpio)) {
+                       dev_err(dev, "Looking up %s property in node %s failed %d\n",
+                               "maxim,reset-gpio", dev->of_node->full_name,
+                               max98373->reset_gpio);
+               } else {
+                       dev_dbg(dev, "maxim,reset-gpio=%d",
+                               max98373->reset_gpio);
+               }
        } else {
-               dev_dbg(dev, "maxim,reset-gpio=%d",
-                       max98373->reset_gpio);
+               /* this makes reset_gpio as invalid */
+               max98373->reset_gpio = -1;
        }
 
        if (!device_property_read_u32(dev, "maxim,spkfb-slot-no", &value))
index 9fa5d44fdc79f57a3b274393dea4cd7f08a4b2a5..58b2468fb2a7139b8d7dbb35085ecbe5acb8215f 100644 (file)
@@ -243,6 +243,10 @@ static const char *const rx_mix1_text[] = {
        "ZERO", "IIR1", "IIR2", "RX1", "RX2", "RX3"
 };
 
+static const char * const rx_mix2_text[] = {
+       "ZERO", "IIR1", "IIR2"
+};
+
 static const char *const dec_mux_text[] = {
        "ZERO", "ADC1", "ADC2", "ADC3", "DMIC1", "DMIC2"
 };
@@ -270,6 +274,16 @@ static const struct soc_enum rx3_mix1_inp_enum[] = {
        SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX3_B2_CTL, 0, 6, rx_mix1_text),
 };
 
+/* RX1 MIX2 */
+static const struct soc_enum rx_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX1_B3_CTL,
+               0, 3, rx_mix2_text);
+
+/* RX2 MIX2 */
+static const struct soc_enum rx2_mix2_inp1_chain_enum =
+       SOC_ENUM_SINGLE(LPASS_CDC_CONN_RX2_B3_CTL,
+               0, 3, rx_mix2_text);
+
 /* DEC */
 static const struct soc_enum dec1_mux_enum = SOC_ENUM_SINGLE(
                                LPASS_CDC_CONN_TX_B1_CTL, 0, 6, dec_mux_text);
@@ -309,6 +323,10 @@ static const struct snd_kcontrol_new rx3_mix1_inp2_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP2 Mux", rx3_mix1_inp_enum[1]);
 static const struct snd_kcontrol_new rx3_mix1_inp3_mux = SOC_DAPM_ENUM(
                                "RX3 MIX1 INP3 Mux", rx3_mix1_inp_enum[2]);
+static const struct snd_kcontrol_new rx1_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX1 MIX2 INP1 Mux", rx_mix2_inp1_chain_enum);
+static const struct snd_kcontrol_new rx2_mix2_inp1_mux = SOC_DAPM_ENUM(
+                               "RX2 MIX2 INP1 Mux", rx2_mix2_inp1_chain_enum);
 
 /* Digital Gain control -38.4 dB to +38.4 dB in 0.3 dB steps */
 static const DECLARE_TLV_DB_SCALE(digital_gain, -3840, 30, 0);
@@ -740,6 +758,10 @@ static const struct snd_soc_dapm_widget msm8916_wcd_digital_dapm_widgets[] = {
                         &rx3_mix1_inp2_mux),
        SND_SOC_DAPM_MUX("RX3 MIX1 INP3", SND_SOC_NOPM, 0, 0,
                         &rx3_mix1_inp3_mux),
+       SND_SOC_DAPM_MUX("RX1 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx1_mix2_inp1_mux),
+       SND_SOC_DAPM_MUX("RX2 MIX2 INP1", SND_SOC_NOPM, 0, 0,
+                        &rx2_mix2_inp1_mux),
 
        SND_SOC_DAPM_MUX("CIC1 MUX", SND_SOC_NOPM, 0, 0, &cic1_mux),
        SND_SOC_DAPM_MUX("CIC2 MUX", SND_SOC_NOPM, 0, 0, &cic2_mux),
index 762595de956c15662c245acf825c2cafb212b528..c506c9305043e115711cbac86fb22e77734b0d54 100644 (file)
@@ -1770,6 +1770,9 @@ static int rt5651_detect_headset(struct snd_soc_component *component)
 
 static bool rt5651_support_button_press(struct rt5651_priv *rt5651)
 {
+       if (!rt5651->hp_jack)
+               return false;
+
        /* Button press support only works with internal jack-detection */
        return (rt5651->hp_jack->status & SND_JACK_MICROPHONE) &&
                rt5651->gpiod_hp_det == NULL;
index 1ef470700ed5fea954321884d705fc50ba9ccc23..c50b75ce82e0b727cd91fb0d17caf93088890da1 100644 (file)
@@ -995,6 +995,16 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
 {
        struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component);
 
+       rt5682->hs_jack = hs_jack;
+
+       if (!hs_jack) {
+               regmap_update_bits(rt5682->regmap, RT5682_IRQ_CTRL_2,
+                                  RT5682_JD1_EN_MASK, RT5682_JD1_DIS);
+               regmap_update_bits(rt5682->regmap, RT5682_RC_CLK_CTRL,
+                                  RT5682_POW_JDH | RT5682_POW_JDL, 0);
+               return 0;
+       }
+
        switch (rt5682->pdata.jd_src) {
        case RT5682_JD1:
                snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_2,
@@ -1032,8 +1042,6 @@ static int rt5682_set_jack_detect(struct snd_soc_component *component,
                break;
        }
 
-       rt5682->hs_jack = hs_jack;
-
        return 0;
 }
 
index c3d06e8bc54f20293f6490a56669e920eeae3a29..d5fb7f5dd551cb834ad77ae9b99ca453d7264008 100644 (file)
@@ -533,13 +533,10 @@ static SOC_ENUM_SINGLE_DECL(dac_osr,
 static SOC_ENUM_SINGLE_DECL(adc_osr,
                            WM8994_OVERSAMPLING, 1, osr_text);
 
-static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+static const struct snd_kcontrol_new wm8994_common_snd_controls[] = {
 SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
                 WM8994_AIF1_ADC1_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
-                WM8994_AIF1_ADC2_RIGHT_VOLUME,
-                1, 119, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2ADC Volume", WM8994_AIF2_ADC_LEFT_VOLUME,
                 WM8994_AIF2_ADC_RIGHT_VOLUME,
                 1, 119, 0, digital_tlv),
@@ -556,8 +553,6 @@ SOC_ENUM("AIF2DACR Source", aif2dacr_src),
 
 SOC_DOUBLE_R_TLV("AIF1DAC1 Volume", WM8994_AIF1_DAC1_LEFT_VOLUME,
                 WM8994_AIF1_DAC1_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
-SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
-                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 SOC_DOUBLE_R_TLV("AIF2DAC Volume", WM8994_AIF2_DAC_LEFT_VOLUME,
                 WM8994_AIF2_DAC_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
 
@@ -565,17 +560,12 @@ SOC_SINGLE_TLV("AIF1 Boost Volume", WM8994_AIF1_CONTROL_2, 10, 3, 0, aif_tlv),
 SOC_SINGLE_TLV("AIF2 Boost Volume", WM8994_AIF2_CONTROL_2, 10, 3, 0, aif_tlv),
 
 SOC_SINGLE("AIF1DAC1 EQ Switch", WM8994_AIF1_DAC1_EQ_GAINS_1, 0, 1, 0),
-SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
 SOC_SINGLE("AIF2 EQ Switch", WM8994_AIF2_EQ_GAINS_1, 0, 1, 0),
 
 WM8994_DRC_SWITCH("AIF1DAC1 DRC Switch", WM8994_AIF1_DRC1_1, 2),
 WM8994_DRC_SWITCH("AIF1ADC1L DRC Switch", WM8994_AIF1_DRC1_1, 1),
 WM8994_DRC_SWITCH("AIF1ADC1R DRC Switch", WM8994_AIF1_DRC1_1, 0),
 
-WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
-WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
-WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
-
 WM8994_DRC_SWITCH("AIF2DAC DRC Switch", WM8994_AIF2_DRC_1, 2),
 WM8994_DRC_SWITCH("AIF2ADCL DRC Switch", WM8994_AIF2_DRC_1, 1),
 WM8994_DRC_SWITCH("AIF2ADCR DRC Switch", WM8994_AIF2_DRC_1, 0),
@@ -594,9 +584,6 @@ SOC_SINGLE("Sidetone HPF Switch", WM8994_SIDETONE, 6, 1, 0),
 SOC_ENUM("AIF1ADC1 HPF Mode", aif1adc1_hpf),
 SOC_DOUBLE("AIF1ADC1 HPF Switch", WM8994_AIF1_ADC1_FILTERS, 12, 11, 1, 0),
 
-SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
-SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
-
 SOC_ENUM("AIF2ADC HPF Mode", aif2adc_hpf),
 SOC_DOUBLE("AIF2ADC HPF Switch", WM8994_AIF2_ADC_FILTERS, 12, 11, 1, 0),
 
@@ -637,6 +624,24 @@ SOC_SINGLE("AIF2DAC 3D Stereo Switch", WM8994_AIF2_DAC_FILTERS_2,
           8, 1, 0),
 };
 
+/* Controls not available on WM1811 */
+static const struct snd_kcontrol_new wm8994_snd_controls[] = {
+SOC_DOUBLE_R_TLV("AIF1ADC2 Volume", WM8994_AIF1_ADC2_LEFT_VOLUME,
+                WM8994_AIF1_ADC2_RIGHT_VOLUME,
+                1, 119, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("AIF1DAC2 Volume", WM8994_AIF1_DAC2_LEFT_VOLUME,
+                WM8994_AIF1_DAC2_RIGHT_VOLUME, 1, 96, 0, digital_tlv),
+
+SOC_SINGLE("AIF1DAC2 EQ Switch", WM8994_AIF1_DAC2_EQ_GAINS_1, 0, 1, 0),
+
+WM8994_DRC_SWITCH("AIF1DAC2 DRC Switch", WM8994_AIF1_DRC2_1, 2),
+WM8994_DRC_SWITCH("AIF1ADC2L DRC Switch", WM8994_AIF1_DRC2_1, 1),
+WM8994_DRC_SWITCH("AIF1ADC2R DRC Switch", WM8994_AIF1_DRC2_1, 0),
+
+SOC_ENUM("AIF1ADC2 HPF Mode", aif1adc2_hpf),
+SOC_DOUBLE("AIF1ADC2 HPF Switch", WM8994_AIF1_ADC2_FILTERS, 12, 11, 1, 0),
+};
+
 static const struct snd_kcontrol_new wm8994_eq_controls[] = {
 SOC_SINGLE_TLV("AIF1DAC1 EQ1 Volume", WM8994_AIF1_DAC1_EQ_GAINS_1, 11, 31, 0,
               eq_tlv),
@@ -4258,13 +4263,15 @@ static int wm8994_component_probe(struct snd_soc_component *component)
        wm8994_handle_pdata(wm8994);
 
        wm_hubs_add_analogue_controls(component);
-       snd_soc_add_component_controls(component, wm8994_snd_controls,
-                            ARRAY_SIZE(wm8994_snd_controls));
+       snd_soc_add_component_controls(component, wm8994_common_snd_controls,
+                                      ARRAY_SIZE(wm8994_common_snd_controls));
        snd_soc_dapm_new_controls(dapm, wm8994_dapm_widgets,
                                  ARRAY_SIZE(wm8994_dapm_widgets));
 
        switch (control->type) {
        case WM8994:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
                                          ARRAY_SIZE(wm8994_specific_dapm_widgets));
                if (control->revision < 4) {
@@ -4284,8 +4291,10 @@ static int wm8994_component_probe(struct snd_soc_component *component)
                }
                break;
        case WM8958:
+               snd_soc_add_component_controls(component, wm8994_snd_controls,
+                                              ARRAY_SIZE(wm8994_snd_controls));
                snd_soc_add_component_controls(component, wm8958_snd_controls,
-                                    ARRAY_SIZE(wm8958_snd_controls));
+                                              ARRAY_SIZE(wm8958_snd_controls));
                snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
                                          ARRAY_SIZE(wm8958_dapm_widgets));
                if (control->revision < 1) {
index ae28d9907c30f58f2bd5a1ee8588b8b03fb9c399..9b8bb7bbe945daa4ad22db1264b44b0ac7f3a0f9 100644 (file)
@@ -1259,8 +1259,7 @@ static unsigned int wmfw_convert_flags(unsigned int in, unsigned int len)
        }
 
        if (in) {
-               if (in & WMFW_CTL_FLAG_READABLE)
-                       out |= rd;
+               out |= rd;
                if (in & WMFW_CTL_FLAG_WRITEABLE)
                        out |= wr;
                if (in & WMFW_CTL_FLAG_VOLATILE)
@@ -3697,11 +3696,16 @@ static int wm_adsp_buffer_parse_legacy(struct wm_adsp *dsp)
        u32 xmalg, addr, magic;
        int i, ret;
 
+       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
+       if (!alg_region) {
+               adsp_err(dsp, "No algorithm region found\n");
+               return -EINVAL;
+       }
+
        buf = wm_adsp_buffer_alloc(dsp);
        if (!buf)
                return -ENOMEM;
 
-       alg_region = wm_adsp_find_alg_region(dsp, WMFW_ADSP2_XM, dsp->fw_id);
        xmalg = dsp->ops->sys_config_size / sizeof(__be32);
 
        addr = alg_region->base + xmalg + ALG_XM_FIELD(magic);
index a437567b8cee233a0dbebd52dfe1c70fe952dcce..4f6e58c3954a220fd0050c5d8c29089c0ee16622 100644 (file)
@@ -308,6 +308,9 @@ static const struct snd_soc_dapm_widget sof_widgets[] = {
        SND_SOC_DAPM_HP("Headphone Jack", NULL),
        SND_SOC_DAPM_MIC("Headset Mic", NULL),
        SND_SOC_DAPM_SPK("Spk", NULL),
+};
+
+static const struct snd_soc_dapm_widget dmic_widgets[] = {
        SND_SOC_DAPM_MIC("SoC DMIC", NULL),
 };
 
@@ -318,10 +321,6 @@ static const struct snd_soc_dapm_route sof_map[] = {
 
        /* other jacks */
        { "IN1P", NULL, "Headset Mic" },
-
-       /* digital mics */
-       {"DMic", NULL, "SoC DMIC"},
-
 };
 
 static const struct snd_soc_dapm_route speaker_map[] = {
@@ -329,6 +328,11 @@ static const struct snd_soc_dapm_route speaker_map[] = {
        { "Spk", NULL, "Speaker" },
 };
 
+static const struct snd_soc_dapm_route dmic_map[] = {
+       /* digital mics */
+       {"DMic", NULL, "SoC DMIC"},
+};
+
 static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_card *card = rtd->card;
@@ -342,6 +346,28 @@ static int speaker_codec_init(struct snd_soc_pcm_runtime *rtd)
        return ret;
 }
 
+static int dmic_init(struct snd_soc_pcm_runtime *rtd)
+{
+       struct snd_soc_card *card = rtd->card;
+       int ret;
+
+       ret = snd_soc_dapm_new_controls(&card->dapm, dmic_widgets,
+                                       ARRAY_SIZE(dmic_widgets));
+       if (ret) {
+               dev_err(card->dev, "DMic widget addition failed: %d\n", ret);
+               /* Don't need to add routes if widget addition failed */
+               return ret;
+       }
+
+       ret = snd_soc_dapm_add_routes(&card->dapm, dmic_map,
+                                     ARRAY_SIZE(dmic_map));
+
+       if (ret)
+               dev_err(card->dev, "DMic map addition failed: %d\n", ret);
+
+       return ret;
+}
+
 /* sof audio machine driver for rt5682 codec */
 static struct snd_soc_card sof_audio_card_rt5682 = {
        .name = "sof_rt5682",
@@ -445,6 +471,7 @@ static struct snd_soc_dai_link *sof_card_dai_links_create(struct device *dev,
                links[id].name = "dmic01";
                links[id].cpus = &cpus[id];
                links[id].cpus->dai_name = "DMIC01 Pin";
+               links[id].init = dmic_init;
                if (dmic_be_num > 1) {
                        /* set up 2 BE links at most */
                        links[id + 1].name = "dmic16k";
@@ -576,6 +603,15 @@ static int sof_audio_probe(struct platform_device *pdev)
        /* need to get main clock from pmc */
        if (sof_rt5682_quirk & SOF_RT5682_MCLK_BYTCHT_EN) {
                ctx->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
+               if (IS_ERR(ctx->mclk)) {
+                       ret = PTR_ERR(ctx->mclk);
+
+                       dev_err(&pdev->dev,
+                               "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+                               ret);
+                       return ret;
+               }
+
                ret = clk_prepare_enable(ctx->mclk);
                if (ret < 0) {
                        dev_err(&pdev->dev,
@@ -621,8 +657,24 @@ static int sof_audio_probe(struct platform_device *pdev)
                                          &sof_audio_card_rt5682);
 }
 
+static int sof_rt5682_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+       struct snd_soc_component *component = NULL;
+
+       for_each_card_components(card, component) {
+               if (!strcmp(component->name, rt5682_component[0].name)) {
+                       snd_soc_component_set_jack(component, NULL, NULL);
+                       break;
+               }
+       }
+
+       return 0;
+}
+
 static struct platform_driver sof_audio = {
        .probe = sof_audio_probe,
+       .remove = sof_rt5682_remove,
        .driver = {
                .name = "sof_rt5682",
                .pm = &snd_soc_pm_ops,
index af2d5a6124c8e122a8055d482b376d8df8be76c3..61c984f10d8e61199b7c02e287532039c6f3d9ed 100644 (file)
@@ -677,7 +677,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
        ret = rockchip_pcm_platform_register(&pdev->dev);
        if (ret) {
                dev_err(&pdev->dev, "Could not register PCM\n");
-               return ret;
+               goto err_suspend;
        }
 
        return 0;
index c213913eb98488f709c097e6d91b9f298bba5399..fd8c6642fb0dfbceeeb8a9170461287643667289 100644 (file)
@@ -5,6 +5,7 @@
 //  Author: Claude <claude@insginal.co.kr>
 
 #include <linux/module.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 
@@ -74,6 +75,17 @@ static struct snd_soc_card arndale_rt5631 = {
        .num_links = ARRAY_SIZE(arndale_rt5631_dai),
 };
 
+static void arndale_put_of_nodes(struct snd_soc_card *card)
+{
+       struct snd_soc_dai_link *dai_link;
+       int i;
+
+       for_each_card_prelinks(card, i, dai_link) {
+               of_node_put(dai_link->cpus->of_node);
+               of_node_put(dai_link->codecs->of_node);
+       }
+}
+
 static int arndale_audio_probe(struct platform_device *pdev)
 {
        int n, ret;
@@ -103,18 +115,31 @@ static int arndale_audio_probe(struct platform_device *pdev)
                if (!arndale_rt5631_dai[0].codecs->of_node) {
                        dev_err(&pdev->dev,
                        "Property 'samsung,audio-codec' missing or invalid\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err_put_of_nodes;
                }
        }
 
        ret = devm_snd_soc_register_card(card->dev, card);
+       if (ret) {
+               dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n", ret);
+               goto err_put_of_nodes;
+       }
+       return 0;
 
-       if (ret)
-               dev_err(&pdev->dev, "snd_soc_register_card() failed:%d\n", ret);
-
+err_put_of_nodes:
+       arndale_put_of_nodes(card);
        return ret;
 }
 
+static int arndale_audio_remove(struct platform_device *pdev)
+{
+       struct snd_soc_card *card = platform_get_drvdata(pdev);
+
+       arndale_put_of_nodes(card);
+       return 0;
+}
+
 static const struct of_device_id samsung_arndale_rt5631_of_match[] __maybe_unused = {
        { .compatible = "samsung,arndale-rt5631", },
        { .compatible = "samsung,arndale-alc5631", },
@@ -129,6 +154,7 @@ static struct platform_driver arndale_audio_driver = {
                .of_match_table = of_match_ptr(samsung_arndale_rt5631_of_match),
        },
        .probe = arndale_audio_probe,
+       .remove = arndale_audio_remove,
 };
 
 module_platform_driver(arndale_audio_driver);
index bda5b958d0dc5c17b1681212fe5cb96377fd2b31..e9596c2096cd585334e6c99710cc6e3194a4f792 100644 (file)
@@ -761,6 +761,7 @@ static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
        }
 
        /* set format */
+       rdai->bit_clk_inv = 0;
        switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
        case SND_SOC_DAIFMT_I2S:
                rdai->sys_delay = 0;
index e163dde5eab1d708a98b0767318811fa4f2491d8..b600d3eaaf5cd2c2c97c1a42e3d091f75970001a 100644 (file)
@@ -1070,7 +1070,7 @@ static int soc_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                        return ret;
        }
 
-       snd_soc_dai_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1097,7 +1097,7 @@ static int soc_pcm_bespoke_trigger(struct snd_pcm_substream *substream,
                        return ret;
        }
 
-       snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
+       ret = snd_soc_dai_bespoke_trigger(cpu_dai, substream, cmd);
        if (ret < 0)
                return ret;
 
@@ -1146,6 +1146,7 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
 {
        struct snd_soc_dpcm *dpcm;
        unsigned long flags;
+       char *name;
 
        /* only add new dpcms */
        for_each_dpcm_be(fe, stream, dpcm) {
@@ -1171,9 +1172,15 @@ static int dpcm_be_connect(struct snd_soc_pcm_runtime *fe,
                        stream ? "<-" : "->", be->dai_link->name);
 
 #ifdef CONFIG_DEBUG_FS
-       dpcm->debugfs_state = debugfs_create_dir(be->dai_link->name,
-                                                fe->debugfs_dpcm_root);
-       debugfs_create_u32("state", 0644, dpcm->debugfs_state, &dpcm->state);
+       name = kasprintf(GFP_KERNEL, "%s:%s", be->dai_link->name,
+                        stream ? "capture" : "playback");
+       if (name) {
+               dpcm->debugfs_state = debugfs_create_dir(name,
+                                                        fe->debugfs_dpcm_root);
+               debugfs_create_u32("state", 0644, dpcm->debugfs_state,
+                                  &dpcm->state);
+               kfree(name);
+       }
 #endif
        return 1;
 }
index aa9a1fca46fa7832271ff0d3919959f94dfaa0a4..0fd032914a318b39d46f89ac8af1435de11ebba1 100644 (file)
@@ -1582,7 +1582,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
 
        /* map user to kernel widget ID */
        template.id = get_widget_id(le32_to_cpu(w->id));
-       if (template.id < 0)
+       if ((int)template.id < 0)
                return template.id;
 
        /* strings are allocated here, but used and freed by the widget */
index a4983f90ff5b31694197b2d72c6e1cdf309e8fb9..2b8711eda362b97362ef2501080c099b7616bc52 100644 (file)
@@ -60,13 +60,16 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value =
-                       mixer_to_ipc(ucontrol->value.integer.value[i],
+               value = mixer_to_ipc(ucontrol->value.integer.value[i],
                                     scontrol->volume_table, sm->max + 1);
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -76,8 +79,7 @@ int snd_sof_volume_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_TYPE_VALUE_CHAN_GET,
                                              SOF_CTRL_CMD_VOLUME,
                                              true);
-
-       return 0;
+       return change;
 }
 
 int snd_sof_switch_get(struct snd_kcontrol *kcontrol,
@@ -105,11 +107,15 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.integer.value[i];
+               value = ucontrol->value.integer.value[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of mixer updates */
@@ -120,7 +126,7 @@ int snd_sof_switch_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_SWITCH,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_enum_get(struct snd_kcontrol *kcontrol,
@@ -148,11 +154,15 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
        struct snd_sof_dev *sdev = scontrol->sdev;
        struct sof_ipc_ctrl_data *cdata = scontrol->control_data;
        unsigned int i, channels = scontrol->num_channels;
+       bool change = false;
+       u32 value;
 
        /* update each channel */
        for (i = 0; i < channels; i++) {
-               cdata->chanv[i].value = ucontrol->value.enumerated.item[i];
+               value = ucontrol->value.enumerated.item[i];
+               change = change || (value != cdata->chanv[i].value);
                cdata->chanv[i].channel = i;
+               cdata->chanv[i].value = value;
        }
 
        /* notify DSP of enum updates */
@@ -163,7 +173,7 @@ int snd_sof_enum_put(struct snd_kcontrol *kcontrol,
                                              SOF_CTRL_CMD_ENUM,
                                              true);
 
-       return 0;
+       return change;
 }
 
 int snd_sof_bytes_get(struct snd_kcontrol *kcontrol,
index 479ba249e219af3ac4acbcd4f52622e51539d601..d62f51d33be148531916c887e6aa046328f5c2dc 100644 (file)
@@ -273,6 +273,16 @@ config SND_SOC_SOF_HDA_AUDIO_CODEC
          Say Y if you want to enable HDAudio codecs with SOF.
          If unsure select "N".
 
+config SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1
+       bool "SOF enable DMI Link L1"
+       help
+         This option enables DMI L1 for both playback and capture
+         and disables known workarounds for specific HDaudio platforms.
+         Only use to look into power optimizations on platforms not
+         affected by DMI L1 issues. This option is not recommended.
+         Say Y if you want to enable DMI Link L1
+         If unsure, select "N".
+
 endif ## SND_SOC_SOF_HDA_COMMON
 
 config SND_SOC_SOF_HDA_LINK_BASELINE
index e282179263e8346f18a24d36389c2a71c8db9f47..80e2826fb447b28524268ccbf598f5f73e75abd7 100644 (file)
@@ -37,6 +37,7 @@
 #define MBOX_SIZE       0x1000
 #define MBOX_DUMP_SIZE 0x30
 #define EXCEPT_OFFSET  0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET    0xFE000
@@ -228,6 +229,11 @@ static void bdw_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
@@ -451,6 +457,7 @@ static int bdw_probe(struct snd_sof_dev *sdev)
        /* TODO: add offsets */
        sdev->mmio_bar = BDW_DSP_BAR;
        sdev->mailbox_bar = BDW_DSP_BAR;
+       sdev->dsp_oops_offset = MBOX_OFFSET;
 
        /* PCI base */
        mmio = platform_get_resource(pdev, IORESOURCE_MEM,
index 5e7a6aaa627a82490448b9a0ce549eb47b08264e..a1e514f7173952fe899923bd9ffda4009753674f 100644 (file)
@@ -28,6 +28,7 @@
 #define MBOX_OFFSET            0x144000
 #define MBOX_SIZE              0x1000
 #define EXCEPT_OFFSET          0x800
+#define EXCEPT_MAX_HDR_SIZE    0x400
 
 /* DSP peripherals */
 #define DMAC0_OFFSET           0x098000
@@ -126,6 +127,11 @@ static void byt_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_mailbox_read(sdev, offset, panic_info, sizeof(*panic_info));
 
index bc41028a7a01de3d101412ebec768aeb3aa21d6f..df1909e1d95060b0b4ddba11d0294b490dca4b03 100644 (file)
@@ -139,20 +139,16 @@ void hda_dsp_ctrl_misc_clock_gating(struct snd_sof_dev *sdev, bool enable)
  */
 int hda_dsp_ctrl_clock_power_gating(struct snd_sof_dev *sdev, bool enable)
 {
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       struct hdac_bus *bus = sof_to_bus(sdev);
-#endif
        u32 val;
 
        /* enable/disable audio dsp clock gating */
        val = enable ? PCI_CGCTL_ADSPDCGE : 0;
        snd_sof_pci_update_bits(sdev, PCI_CGCTL, PCI_CGCTL_ADSPDCGE, val);
 
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
-       /* enable/disable L1 support */
-       val = enable ? SOF_HDA_VS_EM2_L1SEN : 0;
-       snd_hdac_chip_updatel(bus, VS_EM2, SOF_HDA_VS_EM2_L1SEN, val);
-#endif
+       /* enable/disable DMI Link L1 support */
+       val = enable ? HDA_VS_INTEL_EM2_L1SEN : 0;
+       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR, HDA_VS_INTEL_EM2,
+                               HDA_VS_INTEL_EM2_L1SEN, val);
 
        /* enable/disable audio dsp power gating */
        val = enable ? 0 : PCI_PGCTL_ADSPPGD;
index 6427f0b3a2f113b7a7a13e65ce66e3e9c7ac8fc0..65c2af3fcaab71ebc559d8e2a4a1b8e9b173b5d1 100644 (file)
@@ -44,6 +44,7 @@ static int cl_stream_prepare(struct snd_sof_dev *sdev, unsigned int format,
                return -ENODEV;
        }
        hstream = &dsp_stream->hstream;
+       hstream->substream = NULL;
 
        /* allocate DMA buffer */
        ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, &pci->dev, size, dmab);
index ad8d41f22e92ddcf958cd6ac7f4d13f789d8a5f8..2c744718840254fc30eec8a12dc39ac45d014a14 100644 (file)
@@ -185,6 +185,17 @@ hda_dsp_stream_get(struct snd_sof_dev *sdev, int direction)
                        direction == SNDRV_PCM_STREAM_PLAYBACK ?
                        "playback" : "capture");
 
+       /*
+        * Disable DMI Link L1 entry when capture stream is opened.
+        * Workaround to address a known issue with host DMA that results
+        * in xruns during pause/release in capture scenarios.
+        */
+       if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (stream && direction == SNDRV_PCM_STREAM_CAPTURE)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN, 0);
+
        return stream;
 }
 
@@ -193,23 +204,43 @@ int hda_dsp_stream_put(struct snd_sof_dev *sdev, int direction, int stream_tag)
 {
        struct hdac_bus *bus = sof_to_bus(sdev);
        struct hdac_stream *s;
+       bool active_capture_stream = false;
+       bool found = false;
 
        spin_lock_irq(&bus->reg_lock);
 
-       /* find used stream */
+       /*
+        * close stream matching the stream tag
+        * and check if there are any open capture streams.
+        */
        list_for_each_entry(s, &bus->stream_list, list) {
-               if (s->direction == direction &&
-                   s->opened && s->stream_tag == stream_tag) {
+               if (!s->opened)
+                       continue;
+
+               if (s->direction == direction && s->stream_tag == stream_tag) {
                        s->opened = false;
-                       spin_unlock_irq(&bus->reg_lock);
-                       return 0;
+                       found = true;
+               } else if (s->direction == SNDRV_PCM_STREAM_CAPTURE) {
+                       active_capture_stream = true;
                }
        }
 
        spin_unlock_irq(&bus->reg_lock);
 
-       dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
-       return -ENODEV;
+       /* Enable DMI L1 entry if there are no capture streams open */
+       if (!IS_ENABLED(SND_SOC_SOF_HDA_ALWAYS_ENABLE_DMI_L1))
+               if (!active_capture_stream)
+                       snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
+                                               HDA_VS_INTEL_EM2,
+                                               HDA_VS_INTEL_EM2_L1SEN,
+                                               HDA_VS_INTEL_EM2_L1SEN);
+
+       if (!found) {
+               dev_dbg(sdev->dev, "stream_tag %d not opened!\n", stream_tag);
+               return -ENODEV;
+       }
+
+       return 0;
 }
 
 int hda_dsp_stream_trigger(struct snd_sof_dev *sdev,
index c72e9a09eee16a03da41267f127fdf0dbfc1b95d..06e84679087bc6bcb81a9988195475e6f042a354 100644 (file)
@@ -35,6 +35,8 @@
 #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
 #define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
 
+#define EXCEPT_MAX_HDR_SIZE    0x400
+
 /*
  * Debug
  */
@@ -131,6 +133,11 @@ static void hda_dsp_get_registers(struct snd_sof_dev *sdev,
        /* note: variable AR register array is not read */
 
        /* then get panic info */
+       if (xoops->arch_hdr.totalsize > EXCEPT_MAX_HDR_SIZE) {
+               dev_err(sdev->dev, "invalid header size 0x%x. FW oops is bogus\n",
+                       xoops->arch_hdr.totalsize);
+               return;
+       }
        offset += xoops->arch_hdr.totalsize;
        sof_block_read(sdev, sdev->mmio_bar, offset,
                       panic_info, sizeof(*panic_info));
index 5591841a1b6faf9a72aec8a94b9082e1561f1e75..23e430d3e056881853cc193e549ede0334b624da 100644 (file)
@@ -39,7 +39,6 @@
 #define SOF_HDA_WAKESTS                        0x0E
 #define SOF_HDA_WAKESTS_INT_MASK       ((1 << 8) - 1)
 #define SOF_HDA_RIRBSTS                        0x5d
-#define SOF_HDA_VS_EM2_L1SEN            BIT(13)
 
 /* SOF_HDA_GCTL register bist */
 #define SOF_HDA_GCTL_RESET             BIT(0)
 #define HDA_DSP_REG_HIPCIE             (HDA_DSP_IPC_BASE + 0x0C)
 #define HDA_DSP_REG_HIPCCTL            (HDA_DSP_IPC_BASE + 0x10)
 
+/* Intel Vendor Specific Registers */
+#define HDA_VS_INTEL_EM2               0x1030
+#define HDA_VS_INTEL_EM2_L1SEN         BIT(13)
+
 /*  HIPCI */
 #define HDA_DSP_REG_HIPCI_BUSY         BIT(31)
 #define HDA_DSP_REG_HIPCI_MSG_MASK     0x7FFFFFFF
index d7f32745fefe42a793baf869c33318b57a9e44a0..9a9a381a908dee467665c979fecc7d31fb58e3bc 100644 (file)
@@ -546,10 +546,10 @@ int snd_sof_run_firmware(struct snd_sof_dev *sdev)
                                 msecs_to_jiffies(sdev->boot_timeout));
        if (ret == 0) {
                dev_err(sdev->dev, "error: firmware boot failure\n");
-               /* after this point FW_READY msg should be ignored */
-               sdev->boot_complete = true;
                snd_sof_dsp_dbg_dump(sdev, SOF_DBG_REGS | SOF_DBG_MBOX |
                        SOF_DBG_TEXT | SOF_DBG_PCI);
+               /* after this point FW_READY msg should be ignored */
+               sdev->boot_complete = true;
                return -EIO;
        }
 
index e3f6a6dc0f368af0614c1080cbfb678f9e832958..2b876d4974476bd359d4f530cabfeb92776b3164 100644 (file)
@@ -244,7 +244,7 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
                snd_soc_rtdcom_lookup(rtd, DRV_NAME);
        struct snd_sof_dev *sdev = snd_soc_component_get_drvdata(component);
        struct snd_sof_pcm *spcm;
-       int ret;
+       int ret, err = 0;
 
        /* nothing to do for BE */
        if (rtd->dai_link->no_pcm)
@@ -254,26 +254,26 @@ static int sof_pcm_hw_free(struct snd_pcm_substream *substream)
        if (!spcm)
                return -EINVAL;
 
-       if (!spcm->prepared[substream->stream])
-               return 0;
-
        dev_dbg(sdev->dev, "pcm: free stream %d dir %d\n", spcm->pcm.pcm_id,
                substream->stream);
 
-       ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+       if (spcm->prepared[substream->stream]) {
+               ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
+               if (ret < 0)
+                       err = ret;
+       }
 
        snd_pcm_lib_free_pages(substream);
 
        cancel_work_sync(&spcm->stream[substream->stream].period_elapsed_work);
 
-       if (ret < 0)
-               return ret;
-
        ret = snd_sof_pcm_platform_hw_free(sdev, substream);
-       if (ret < 0)
+       if (ret < 0) {
                dev_err(sdev->dev, "error: platform hw free failed\n");
+               err = ret;
+       }
 
-       return ret;
+       return err;
 }
 
 static int sof_pcm_prepare(struct snd_pcm_substream *substream)
@@ -323,6 +323,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        struct sof_ipc_stream stream;
        struct sof_ipc_reply reply;
        bool reset_hw_params = false;
+       bool ipc_first = false;
        int ret;
 
        /* nothing to do for BE */
@@ -343,6 +344,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_PAUSE;
+               ipc_first = true;
                break;
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_RELEASE;
@@ -363,6 +365,7 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        case SNDRV_PCM_TRIGGER_SUSPEND:
        case SNDRV_PCM_TRIGGER_STOP:
                stream.hdr.cmd |= SOF_IPC_STREAM_TRIG_STOP;
+               ipc_first = true;
                reset_hw_params = true;
                break;
        default:
@@ -370,12 +373,22 @@ static int sof_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
                return -EINVAL;
        }
 
-       snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+       /*
+        * DMA and IPC sequence is different for start and stop. Need to send
+        * STOP IPC before stop DMA
+        */
+       if (!ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
 
        /* send IPC to the DSP */
        ret = sof_ipc_tx_message(sdev->ipc, stream.hdr.cmd, &stream,
                                 sizeof(stream), &reply, sizeof(reply));
 
+       /* need to STOP DMA even if STOP IPC failed */
+       if (ipc_first)
+               snd_sof_pcm_platform_trigger(sdev, substream, cmd);
+
+       /* free PCM if reset_hw_params is set and the STOP IPC is successful */
        if (!ret && reset_hw_params)
                ret = sof_pcm_dsp_pcm_free(substream, sdev, spcm);
 
index fc85efbad378c7308e4c410b5a9e6f6d3683c07b..0aabb3190ddc9735aebdbc494805a28f9b97d035 100644 (file)
@@ -920,7 +920,9 @@ static void sof_parse_word_tokens(struct snd_soc_component *scomp,
                for (j = 0; j < count; j++) {
                        /* match token type */
                        if (!(tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_WORD ||
-                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT))
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_SHORT ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BYTE ||
+                             tokens[j].type == SND_SOC_TPLG_TUPLE_TYPE_BOOL))
                                continue;
 
                        /* match token id */
index d7501f88aaa630afb61fe87352eb1bcb287b9700..a4060813bc74c7b088bdebfa39dc1d624a2b3cc6 100644 (file)
@@ -505,10 +505,20 @@ static int stm32_sai_set_sysclk(struct snd_soc_dai *cpu_dai,
        if (dir == SND_SOC_CLOCK_OUT && sai->sai_mclk) {
                ret = regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX,
                                         SAI_XCR1_NODIV,
-                                        (unsigned int)~SAI_XCR1_NODIV);
+                                        freq ? 0 : SAI_XCR1_NODIV);
                if (ret < 0)
                        return ret;
 
+               /* Assume shutdown if requested frequency is 0Hz */
+               if (!freq) {
+                       /* Release mclk rate only if rate was actually set */
+                       if (sai->mclk_rate) {
+                               clk_rate_exclusive_put(sai->sai_mclk);
+                               sai->mclk_rate = 0;
+                       }
+                       return 0;
+               }
+
                /* If master clock is used, set parent clock now */
                ret = stm32_sai_set_parent_clock(sai, freq);
                if (ret)
@@ -1093,15 +1103,6 @@ static void stm32_sai_shutdown(struct snd_pcm_substream *substream,
 
        regmap_update_bits(sai->regmap, STM_SAI_IMR_REGX, SAI_XIMR_MASK, 0);
 
-       regmap_update_bits(sai->regmap, STM_SAI_CR1_REGX, SAI_XCR1_NODIV,
-                          SAI_XCR1_NODIV);
-
-       /* Release mclk rate only if rate was actually set */
-       if (sai->mclk_rate) {
-               clk_rate_exclusive_put(sai->sai_mclk);
-               sai->mclk_rate = 0;
-       }
-
        clk_disable_unprepare(sai->sai_ck);
 
        spin_lock_irqsave(&sai->irq_lock, flags);
index 33cd26763c0ee5d4509e156d43d83a6c50971988..ff5ab24f3bd1f36013ffd343b39139d48a5feff8 100644 (file)
@@ -348,6 +348,9 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
                ep = 0x84;
                ifnum = 0;
                goto add_sync_ep_from_ifnum;
+       case USB_ID(0x0582, 0x01d8): /* BOSS Katana */
+               /* BOSS Katana amplifiers do not need quirks */
+               return 0;
        }
 
        if (attr == USB_ENDPOINT_SYNC_ASYNC &&
index fbfde996fee76df28b56439c0030fb1cb0de7da3..0bbe1201a6ac8c57c7ae533e516c92fe67f832a7 100644 (file)
@@ -1657,6 +1657,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
        case 0x23ba:  /* Playback Designs */
        case 0x25ce:  /* Mytek devices */
        case 0x278b:  /* Rotel? */
+       case 0x292b:  /* Gustard/Ess based devices */
        case 0x2ab6:  /* T+A devices */
        case 0x3842:  /* EVGA */
        case 0xc502:  /* HiBy devices */
index 3c8f73a0eb12254385f9b2ed007bafc46ee74109..a5e584b60dcd83efd9b6732980e72e074e160c31 100644 (file)
@@ -75,7 +75,7 @@ static bool validate_processing_unit(const void *p,
 
        if (d->bLength < sizeof(*d))
                return false;
-       len = d->bLength < sizeof(*d) + d->bNrInPins;
+       len = sizeof(*d) + d->bNrInPins;
        if (d->bLength < len)
                return false;
        switch (v->protocol) {
index a4217c1a5d01178c53346ea3f48ed3415aa956fd..2769360f195cafe1ead78ea917495778a7a52c21 100644 (file)
@@ -266,8 +266,10 @@ struct kvm_vcpu_events {
 #define   KVM_DEV_ARM_ITS_CTRL_RESET           4
 
 /* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT                28
+#define KVM_ARM_IRQ_VCPU2_MASK         0xf
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
-#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_TYPE_MASK          0xf
 #define KVM_ARM_IRQ_VCPU_SHIFT         16
 #define KVM_ARM_IRQ_VCPU_MASK          0xff
 #define KVM_ARM_IRQ_NUM_SHIFT          0
index 9a507716ae2fe723624d7c7de3c1b88501d5f9a3..67c21f9bdbad2556be4114dfa09b8006a11e708e 100644 (file)
@@ -325,8 +325,10 @@ struct kvm_vcpu_events {
 #define   KVM_ARM_VCPU_TIMER_IRQ_PTIMER                1
 
 /* KVM_IRQ_LINE irq field index values */
+#define KVM_ARM_IRQ_VCPU2_SHIFT                28
+#define KVM_ARM_IRQ_VCPU2_MASK         0xf
 #define KVM_ARM_IRQ_TYPE_SHIFT         24
-#define KVM_ARM_IRQ_TYPE_MASK          0xff
+#define KVM_ARM_IRQ_TYPE_MASK          0xf
 #define KVM_ARM_IRQ_VCPU_SHIFT         16
 #define KVM_ARM_IRQ_VCPU_MASK          0xff
 #define KVM_ARM_IRQ_NUM_SHIFT          0
index 47104e5b47fd47b7941e3631e8e8eea4798e3d21..436ec7636927379ff40432478e9f175d886a1e7d 100644 (file)
@@ -231,6 +231,12 @@ struct kvm_guest_debug_arch {
 #define KVM_SYNC_GSCB   (1UL << 9)
 #define KVM_SYNC_BPBC   (1UL << 10)
 #define KVM_SYNC_ETOKEN (1UL << 11)
+
+#define KVM_SYNC_S390_VALID_FIELDS \
+       (KVM_SYNC_PREFIX | KVM_SYNC_GPRS | KVM_SYNC_ACRS | KVM_SYNC_CRS | \
+        KVM_SYNC_ARCH0 | KVM_SYNC_PFAULT | KVM_SYNC_VRS | KVM_SYNC_RICCB | \
+        KVM_SYNC_FPRS | KVM_SYNC_GSCB | KVM_SYNC_BPBC | KVM_SYNC_ETOKEN)
+
 /* length and alignment of the sdnx as a power of two */
 #define SDNXC 8
 #define SDNXL (1UL << SDNXC)
index a9731f8a480f3bca0785c94e870c74be6c191a67..2e8a30f06c74641cead87f4f460089f081c63ea7 100644 (file)
@@ -75,6 +75,7 @@
 #define SVM_EXIT_MWAIT         0x08b
 #define SVM_EXIT_MWAIT_COND    0x08c
 #define SVM_EXIT_XSETBV        0x08d
+#define SVM_EXIT_RDPRU         0x08e
 #define SVM_EXIT_NPF           0x400
 #define SVM_EXIT_AVIC_INCOMPLETE_IPI           0x401
 #define SVM_EXIT_AVIC_UNACCELERATED_ACCESS     0x402
index f0b0c90dd398246eb2882050d69c6b53ccca11af..3eb8411ab60efb14e4890c711569bfe3259c8687 100644 (file)
@@ -31,6 +31,7 @@
 #define EXIT_REASON_EXCEPTION_NMI       0
 #define EXIT_REASON_EXTERNAL_INTERRUPT  1
 #define EXIT_REASON_TRIPLE_FAULT        2
+#define EXIT_REASON_INIT_SIGNAL                        3
 
 #define EXIT_REASON_PENDING_INTERRUPT   7
 #define EXIT_REASON_NMI_WINDOW          8
 #define EXIT_REASON_PML_FULL            62
 #define EXIT_REASON_XSAVES              63
 #define EXIT_REASON_XRSTORS             64
+#define EXIT_REASON_UMWAIT              67
+#define EXIT_REASON_TPAUSE              68
 
 #define VMX_EXIT_REASONS \
        { EXIT_REASON_EXCEPTION_NMI,         "EXCEPTION_NMI" }, \
        { EXIT_REASON_EXTERNAL_INTERRUPT,    "EXTERNAL_INTERRUPT" }, \
        { EXIT_REASON_TRIPLE_FAULT,          "TRIPLE_FAULT" }, \
+       { EXIT_REASON_INIT_SIGNAL,           "INIT_SIGNAL" }, \
        { EXIT_REASON_PENDING_INTERRUPT,     "PENDING_INTERRUPT" }, \
        { EXIT_REASON_NMI_WINDOW,            "NMI_WINDOW" }, \
        { EXIT_REASON_TASK_SWITCH,           "TASK_SWITCH" }, \
        { EXIT_REASON_RDSEED,                "RDSEED" }, \
        { EXIT_REASON_PML_FULL,              "PML_FULL" }, \
        { EXIT_REASON_XSAVES,                "XSAVES" }, \
-       { EXIT_REASON_XRSTORS,               "XRSTORS" }
+       { EXIT_REASON_XRSTORS,               "XRSTORS" }, \
+       { EXIT_REASON_UMWAIT,                "UMWAIT" }, \
+       { EXIT_REASON_TPAUSE,                "TPAUSE" }
 
 #define VMX_ABORT_SAVE_GUEST_MSR_FAIL        1
 #define VMX_ABORT_LOAD_HOST_PDPTE_FAIL       2
index fbf5e4a0cb9c9cdb7c3c13c28e2d4cf3b5b90f3b..5d1995fd369c696d919240f66b9dad92ff4612ab 100644 (file)
@@ -12,7 +12,11 @@ INSTALL ?= install
 CFLAGS += -Wall -O2
 CFLAGS += -D__EXPORTED_HEADERS__ -I$(srctree)/include/uapi -I$(srctree)/include
 
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
index 6ecdd10678260f278475787af8d7b1e61e970bae..1178d302757ea71977e95d6880445839434fcf4b 100644 (file)
@@ -3,7 +3,11 @@ include ../scripts/Makefile.include
 
 bindir ?= /usr/bin
 
-ifeq ($(srctree),)
+# This will work when gpio is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is set to ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 endif
index 63b1f506ea678200d9433002e207ee9cf7996ad9..c160a5354eb62b3b17de564be439451c812470ae 100644 (file)
@@ -67,6 +67,9 @@
 #define MADV_WIPEONFORK 18             /* Zero memory on fork, child only */
 #define MADV_KEEPONFORK 19             /* Undo MADV_WIPEONFORK */
 
+#define MADV_COLD      20              /* deactivate these pages */
+#define MADV_PAGEOUT   21              /* reclaim these pages */
+
 /* compatibility flags */
 #define MAP_FILE       0
 
index 328d05e77d9f6c6b3783dc67ba7316545bb9c0c3..469dc512cca3513f0a38400c5ed1598096147de9 100644 (file)
@@ -521,6 +521,7 @@ typedef struct drm_i915_irq_wait {
 #define   I915_SCHEDULER_CAP_PRIORITY  (1ul << 1)
 #define   I915_SCHEDULER_CAP_PREEMPTION        (1ul << 2)
 #define   I915_SCHEDULER_CAP_SEMAPHORES        (1ul << 3)
+#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
 
 #define I915_PARAM_HUC_STATUS           42
 
index 2a616aa3f6862da43e1e344439fd977442991de5..379a612f8f1d9f45d1703bc55a67fa677862b8d0 100644 (file)
@@ -13,6 +13,9 @@
 #include <linux/limits.h>
 #include <linux/ioctl.h>
 #include <linux/types.h>
+#ifndef __KERNEL__
+#include <linux/fscrypt.h>
+#endif
 
 /* Use of MS_* flags within the kernel is restricted to core mount(2) code. */
 #if !defined(__KERNEL__)
@@ -212,57 +215,6 @@ struct fsxattr {
 #define FS_IOC_GETFSLABEL              _IOR(0x94, 49, char[FSLABEL_MAX])
 #define FS_IOC_SETFSLABEL              _IOW(0x94, 50, char[FSLABEL_MAX])
 
-/*
- * File system encryption support
- */
-/* Policy provided via an ioctl on the topmost directory */
-#define FS_KEY_DESCRIPTOR_SIZE 8
-
-#define FS_POLICY_FLAGS_PAD_4          0x00
-#define FS_POLICY_FLAGS_PAD_8          0x01
-#define FS_POLICY_FLAGS_PAD_16         0x02
-#define FS_POLICY_FLAGS_PAD_32         0x03
-#define FS_POLICY_FLAGS_PAD_MASK       0x03
-#define FS_POLICY_FLAG_DIRECT_KEY      0x04    /* use master key directly */
-#define FS_POLICY_FLAGS_VALID          0x07
-
-/* Encryption algorithms */
-#define FS_ENCRYPTION_MODE_INVALID             0
-#define FS_ENCRYPTION_MODE_AES_256_XTS         1
-#define FS_ENCRYPTION_MODE_AES_256_GCM         2
-#define FS_ENCRYPTION_MODE_AES_256_CBC         3
-#define FS_ENCRYPTION_MODE_AES_256_CTS         4
-#define FS_ENCRYPTION_MODE_AES_128_CBC         5
-#define FS_ENCRYPTION_MODE_AES_128_CTS         6
-#define FS_ENCRYPTION_MODE_SPECK128_256_XTS    7 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_SPECK128_256_CTS    8 /* Removed, do not use. */
-#define FS_ENCRYPTION_MODE_ADIANTUM            9
-
-struct fscrypt_policy {
-       __u8 version;
-       __u8 contents_encryption_mode;
-       __u8 filenames_encryption_mode;
-       __u8 flags;
-       __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-};
-
-#define FS_IOC_SET_ENCRYPTION_POLICY   _IOR('f', 19, struct fscrypt_policy)
-#define FS_IOC_GET_ENCRYPTION_PWSALT   _IOW('f', 20, __u8[16])
-#define FS_IOC_GET_ENCRYPTION_POLICY   _IOW('f', 21, struct fscrypt_policy)
-
-/* Parameters for passing an encryption key into the kernel keyring */
-#define FS_KEY_DESC_PREFIX             "fscrypt:"
-#define FS_KEY_DESC_PREFIX_SIZE                8
-
-/* Structure that userspace passes to the kernel keyring */
-#define FS_MAX_KEY_SIZE                        64
-
-struct fscrypt_key {
-       __u32 mode;
-       __u8 raw[FS_MAX_KEY_SIZE];
-       __u32 size;
-};
-
 /*
  * Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
  *
@@ -306,6 +258,7 @@ struct fscrypt_key {
 #define FS_TOPDIR_FL                   0x00020000 /* Top of directory hierarchies*/
 #define FS_HUGE_FILE_FL                        0x00040000 /* Reserved for ext4 */
 #define FS_EXTENT_FL                   0x00080000 /* Extents */
+#define FS_VERITY_FL                   0x00100000 /* Verity protected inode */
 #define FS_EA_INODE_FL                 0x00200000 /* Inode used for large EA */
 #define FS_EOFBLOCKS_FL                        0x00400000 /* Reserved for ext4 */
 #define FS_NOCOW_FL                    0x00800000 /* Do not cow file */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
new file mode 100644 (file)
index 0000000..39ccfe9
--- /dev/null
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * fscrypt user API
+ *
+ * These ioctls can be used on filesystems that support fscrypt.  See the
+ * "User API" section of Documentation/filesystems/fscrypt.rst.
+ */
+#ifndef _UAPI_LINUX_FSCRYPT_H
+#define _UAPI_LINUX_FSCRYPT_H
+
+#include <linux/types.h>
+
+/* Encryption policy flags */
+#define FSCRYPT_POLICY_FLAGS_PAD_4             0x00
+#define FSCRYPT_POLICY_FLAGS_PAD_8             0x01
+#define FSCRYPT_POLICY_FLAGS_PAD_16            0x02
+#define FSCRYPT_POLICY_FLAGS_PAD_32            0x03
+#define FSCRYPT_POLICY_FLAGS_PAD_MASK          0x03
+#define FSCRYPT_POLICY_FLAG_DIRECT_KEY         0x04
+#define FSCRYPT_POLICY_FLAGS_VALID             0x07
+
+/* Encryption algorithms */
+#define FSCRYPT_MODE_AES_256_XTS               1
+#define FSCRYPT_MODE_AES_256_CTS               4
+#define FSCRYPT_MODE_AES_128_CBC               5
+#define FSCRYPT_MODE_AES_128_CTS               6
+#define FSCRYPT_MODE_ADIANTUM                  9
+#define __FSCRYPT_MODE_MAX                     9
+
+/*
+ * Legacy policy version; ad-hoc KDF and no key verification.
+ * For new encrypted directories, use fscrypt_policy_v2 instead.
+ *
+ * Careful: the .version field for this is actually 0, not 1.
+ */
+#define FSCRYPT_POLICY_V1              0
+#define FSCRYPT_KEY_DESCRIPTOR_SIZE    8
+struct fscrypt_policy_v1 {
+       __u8 version;
+       __u8 contents_encryption_mode;
+       __u8 filenames_encryption_mode;
+       __u8 flags;
+       __u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+};
+#define fscrypt_policy fscrypt_policy_v1
+
+/*
+ * Process-subscribed "logon" key description prefix and payload format.
+ * Deprecated; prefer FS_IOC_ADD_ENCRYPTION_KEY instead.
+ */
+#define FSCRYPT_KEY_DESC_PREFIX                "fscrypt:"
+#define FSCRYPT_KEY_DESC_PREFIX_SIZE   8
+#define FSCRYPT_MAX_KEY_SIZE           64
+struct fscrypt_key {
+       __u32 mode;
+       __u8 raw[FSCRYPT_MAX_KEY_SIZE];
+       __u32 size;
+};
+
+/*
+ * New policy version with HKDF and key verification (recommended).
+ */
+#define FSCRYPT_POLICY_V2              2
+#define FSCRYPT_KEY_IDENTIFIER_SIZE    16
+struct fscrypt_policy_v2 {
+       __u8 version;
+       __u8 contents_encryption_mode;
+       __u8 filenames_encryption_mode;
+       __u8 flags;
+       __u8 __reserved[4];
+       __u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_POLICY_EX */
+struct fscrypt_get_policy_ex_arg {
+       __u64 policy_size; /* input/output */
+       union {
+               __u8 version;
+               struct fscrypt_policy_v1 v1;
+               struct fscrypt_policy_v2 v2;
+       } policy; /* output */
+};
+
+/*
+ * v1 policy keys are specified by an arbitrary 8-byte key "descriptor",
+ * matching fscrypt_policy_v1::master_key_descriptor.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR       1
+
+/*
+ * v2 policy keys are specified by a 16-byte key "identifier" which the kernel
+ * calculates as a cryptographic hash of the key itself,
+ * matching fscrypt_policy_v2::master_key_identifier.
+ */
+#define FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER       2
+
+/*
+ * Specifies a key, either for v1 or v2 policies.  This doesn't contain the
+ * actual key itself; this is just the "name" of the key.
+ */
+struct fscrypt_key_specifier {
+       __u32 type;     /* one of FSCRYPT_KEY_SPEC_TYPE_* */
+       __u32 __reserved;
+       union {
+               __u8 __reserved[32]; /* reserve some extra space */
+               __u8 descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE];
+               __u8 identifier[FSCRYPT_KEY_IDENTIFIER_SIZE];
+       } u;
+};
+
+/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
+struct fscrypt_add_key_arg {
+       struct fscrypt_key_specifier key_spec;
+       __u32 raw_size;
+       __u32 __reserved[9];
+       __u8 raw[];
+};
+
+/* Struct passed to FS_IOC_REMOVE_ENCRYPTION_KEY */
+struct fscrypt_remove_key_arg {
+       struct fscrypt_key_specifier key_spec;
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_FILES_BUSY     0x00000001
+#define FSCRYPT_KEY_REMOVAL_STATUS_FLAG_OTHER_USERS    0x00000002
+       __u32 removal_status_flags;     /* output */
+       __u32 __reserved[5];
+};
+
+/* Struct passed to FS_IOC_GET_ENCRYPTION_KEY_STATUS */
+struct fscrypt_get_key_status_arg {
+       /* input */
+       struct fscrypt_key_specifier key_spec;
+       __u32 __reserved[6];
+
+       /* output */
+#define FSCRYPT_KEY_STATUS_ABSENT              1
+#define FSCRYPT_KEY_STATUS_PRESENT             2
+#define FSCRYPT_KEY_STATUS_INCOMPLETELY_REMOVED        3
+       __u32 status;
+#define FSCRYPT_KEY_STATUS_FLAG_ADDED_BY_SELF   0x00000001
+       __u32 status_flags;
+       __u32 user_count;
+       __u32 __out_reserved[13];
+};
+
+#define FS_IOC_SET_ENCRYPTION_POLICY           _IOR('f', 19, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_PWSALT           _IOW('f', 20, __u8[16])
+#define FS_IOC_GET_ENCRYPTION_POLICY           _IOW('f', 21, struct fscrypt_policy)
+#define FS_IOC_GET_ENCRYPTION_POLICY_EX                _IOWR('f', 22, __u8[9]) /* size + version */
+#define FS_IOC_ADD_ENCRYPTION_KEY              _IOWR('f', 23, struct fscrypt_add_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY           _IOWR('f', 24, struct fscrypt_remove_key_arg)
+#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
+#define FS_IOC_GET_ENCRYPTION_KEY_STATUS       _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+
+/**********************************************************************/
+
+/* old names; don't add anything new here! */
+#ifndef __KERNEL__
+#define FS_KEY_DESCRIPTOR_SIZE         FSCRYPT_KEY_DESCRIPTOR_SIZE
+#define FS_POLICY_FLAGS_PAD_4          FSCRYPT_POLICY_FLAGS_PAD_4
+#define FS_POLICY_FLAGS_PAD_8          FSCRYPT_POLICY_FLAGS_PAD_8
+#define FS_POLICY_FLAGS_PAD_16         FSCRYPT_POLICY_FLAGS_PAD_16
+#define FS_POLICY_FLAGS_PAD_32         FSCRYPT_POLICY_FLAGS_PAD_32
+#define FS_POLICY_FLAGS_PAD_MASK       FSCRYPT_POLICY_FLAGS_PAD_MASK
+#define FS_POLICY_FLAG_DIRECT_KEY      FSCRYPT_POLICY_FLAG_DIRECT_KEY
+#define FS_POLICY_FLAGS_VALID          FSCRYPT_POLICY_FLAGS_VALID
+#define FS_ENCRYPTION_MODE_INVALID     0       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_XTS FSCRYPT_MODE_AES_256_XTS
+#define FS_ENCRYPTION_MODE_AES_256_GCM 2       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CBC 3       /* never used */
+#define FS_ENCRYPTION_MODE_AES_256_CTS FSCRYPT_MODE_AES_256_CTS
+#define FS_ENCRYPTION_MODE_AES_128_CBC FSCRYPT_MODE_AES_128_CBC
+#define FS_ENCRYPTION_MODE_AES_128_CTS FSCRYPT_MODE_AES_128_CTS
+#define FS_ENCRYPTION_MODE_SPECK128_256_XTS    7       /* removed */
+#define FS_ENCRYPTION_MODE_SPECK128_256_CTS    8       /* removed */
+#define FS_ENCRYPTION_MODE_ADIANTUM    FSCRYPT_MODE_ADIANTUM
+#define FS_KEY_DESC_PREFIX             FSCRYPT_KEY_DESC_PREFIX
+#define FS_KEY_DESC_PREFIX_SIZE                FSCRYPT_KEY_DESC_PREFIX_SIZE
+#define FS_MAX_KEY_SIZE                        FSCRYPT_MAX_KEY_SIZE
+#endif /* !__KERNEL__ */
+
+#endif /* _UAPI_LINUX_FSCRYPT_H */
index 5e3f12d5359e731290e499c0733e9b6fb496158e..52641d8ca9e83f25b983f3cc6be115c37bad2d98 100644 (file)
@@ -243,6 +243,8 @@ struct kvm_hyperv_exit {
 #define KVM_INTERNAL_ERROR_SIMUL_EX    2
 /* Encounter unexpected vm-exit due to delivery event. */
 #define KVM_INTERNAL_ERROR_DELIVERY_EV 3
+/* Encounter unexpected vm-exit reason */
+#define KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON      4
 
 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
 struct kvm_run {
@@ -996,6 +998,8 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
 #define KVM_CAP_PMU_EVENT_FILTER 173
+#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
+#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
@@ -1142,6 +1146,7 @@ struct kvm_dirty_tlb {
 #define KVM_REG_S390           0x5000000000000000ULL
 #define KVM_REG_ARM64          0x6000000000000000ULL
 #define KVM_REG_MIPS           0x7000000000000000ULL
+#define KVM_REG_RISCV          0x8000000000000000ULL
 
 #define KVM_REG_SIZE_SHIFT     52
 #define KVM_REG_SIZE_MASK      0x00f0000000000000ULL
index b3105ac1381a8d8651f7f2fbe6b700bd745ead01..99335e1f4a275b1dbc86219a6dee7701b18fd624 100644 (file)
 #define CLONE_NEWNET           0x40000000      /* New network namespace */
 #define CLONE_IO               0x80000000      /* Clone io context */
 
-/*
- * Arguments for the clone3 syscall
+#ifndef __ASSEMBLY__
+/**
+ * struct clone_args - arguments for the clone3 syscall
+ * @flags:       Flags for the new process as listed above.
+ *               All flags are valid except for CSIGNAL and
+ *               CLONE_DETACHED.
+ * @pidfd:       If CLONE_PIDFD is set, a pidfd will be
+ *               returned in this argument.
+ * @child_tid:   If CLONE_CHILD_SETTID is set, the TID of the
+ *               child process will be returned in the child's
+ *               memory.
+ * @parent_tid:  If CLONE_PARENT_SETTID is set, the TID of
+ *               the child process will be returned in the
+ *               parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ *               sent when the child exits.
+ * @stack:       Specify the location of the stack for the
+ *               child process.
+ * @stack_size:  The size of the stack for the child process.
+ * @tls:         If CLONE_SETTLS is set, the tls descriptor
+ *               is set to tls.
+ *
+ * The structure is versioned by size and thus extensible.
+ * New struct members must go at the end of the struct and
+ * must be properly 64bit aligned.
  */
 struct clone_args {
        __aligned_u64 flags;
@@ -46,6 +69,9 @@ struct clone_args {
        __aligned_u64 stack_size;
        __aligned_u64 tls;
 };
+#endif
+
+#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
 
 /*
  * Scheduling policies
index 78efe870c2b7c85c9814ac90c04e137618bfd866..cf525cddeb94dc2cd2c53355ecdfbfebf095c168 100644 (file)
@@ -158,6 +158,7 @@ struct usbdevfs_hub_portinfo {
 #define USBDEVFS_CAP_MMAP                      0x20
 #define USBDEVFS_CAP_DROP_PRIVILEGES           0x40
 #define USBDEVFS_CAP_CONNINFO_EX               0x80
+#define USBDEVFS_CAP_SUSPEND                   0x100
 
 /* USBDEVFS_DISCONNECT_CLAIM flags & struct */
 
@@ -223,5 +224,8 @@ struct usbdevfs_streams {
  * extending size of the data returned.
  */
 #define USBDEVFS_CONNINFO_EX(len)  _IOC(_IOC_READ, 'U', 32, len)
+#define USBDEVFS_FORBID_SUSPEND    _IO('U', 33)
+#define USBDEVFS_ALLOW_SUSPEND     _IO('U', 34)
+#define USBDEVFS_WAIT_FOR_RESUME   _IO('U', 35)
 
 #endif /* _UAPI_LINUX_USBDEVICE_FS_H */
index c6f94cffe06e106549b835e9970b6aeeb577c9a8..56ce6292071b1d3575c26aafaea7967004d5a4cc 100644 (file)
@@ -8,7 +8,11 @@ LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION)))
 
 MAKEFLAGS += --no-print-directory
 
-ifeq ($(srctree),)
+# This will work when bpf is built in tools env. where srctree
+# isn't set and when invoked from selftests build, where srctree
+# is a ".". building_out_of_srctree is undefined for in srctree
+# builds
+ifndef building_out_of_srctree
 srctree := $(patsubst %/,%,$(dir $(CURDIR)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
 srctree := $(patsubst %/,%,$(dir $(srctree)))
@@ -110,6 +114,9 @@ override CFLAGS += $(INCLUDES)
 override CFLAGS += -fvisibility=hidden
 override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
 
+# flags specific for shared library
+SHLIB_FLAGS := -DSHARED
+
 ifeq ($(VERBOSE),1)
   Q =
 else
@@ -126,14 +133,17 @@ all:
 export srctree OUTPUT CC LD CFLAGS V
 include $(srctree)/tools/build/Makefile.include
 
-BPF_IN         := $(OUTPUT)libbpf-in.o
+SHARED_OBJDIR  := $(OUTPUT)sharedobjs/
+STATIC_OBJDIR  := $(OUTPUT)staticobjs/
+BPF_IN_SHARED  := $(SHARED_OBJDIR)libbpf-in.o
+BPF_IN_STATIC  := $(STATIC_OBJDIR)libbpf-in.o
 VERSION_SCRIPT := libbpf.map
 
 LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
 LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
 PC_FILE                := $(addprefix $(OUTPUT),$(PC_FILE))
 
-GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
+GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \
                           cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \
                           sort -u | wc -l)
@@ -155,7 +165,7 @@ all: fixdep
 
 all_cmd: $(CMD_TARGETS) check
 
-$(BPF_IN): force elfdep bpfdep
+$(BPF_IN_SHARED): force elfdep bpfdep
        @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
        (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true
@@ -171,17 +181,20 @@ $(BPF_IN): force elfdep bpfdep
        @(test -f ../../include/uapi/linux/if_xdp.h -a -f ../../../include/uapi/linux/if_xdp.h && ( \
        (diff -B ../../include/uapi/linux/if_xdp.h ../../../include/uapi/linux/if_xdp.h >/dev/null) || \
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
-       $(Q)$(MAKE) $(build)=libbpf
+       $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)"
+
+$(BPF_IN_STATIC): force elfdep bpfdep
+       $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR)
 
 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
 
-$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN_SHARED)
        $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \
                                    -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
        @ln -sf $(@F) $(OUTPUT)libbpf.so
        @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION)
 
-$(OUTPUT)libbpf.a: $(BPF_IN)
+$(OUTPUT)libbpf.a: $(BPF_IN_STATIC)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
 
 $(OUTPUT)test_libbpf: test_libbpf.cpp $(OUTPUT)libbpf.a
@@ -197,7 +210,7 @@ check: check_abi
 
 check_abi: $(OUTPUT)libbpf.so
        @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then  \
-               echo "Warning: Num of global symbols in $(BPF_IN)"       \
+               echo "Warning: Num of global symbols in $(BPF_IN_SHARED)"        \
                     "($(GLOBAL_SYM_COUNT)) does NOT match with num of"  \
                     "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
                     "Please make sure all LIBBPF_API symbols are"       \
@@ -255,9 +268,9 @@ config-clean:
        $(Q)$(MAKE) -C $(srctree)/tools/build/feature/ clean >/dev/null
 
 clean:
-       $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
+       $(call QUIET_CLEAN, libbpf) $(RM) -rf $(TARGETS) $(CXX_TEST_TARGET) \
                *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \
-               *.pc LIBBPF-CFLAGS
+               *.pc LIBBPF-CFLAGS $(SHARED_OBJDIR) $(STATIC_OBJDIR)
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 2e83a34f8c79a7e018a65ad6cdcd0ea3dc5b4764..98216a69c32f0d42ce4702c515c4881568243229 100644 (file)
        (offsetof(TYPE, FIELD) + sizeof(((TYPE *)0)->FIELD))
 #endif
 
+/* Symbol versioning is different between static and shared library.
+ * Properly versioned symbols are needed for shared library, but
+ * only the symbol of the new version is needed for static library.
+ */
+#ifdef SHARED
+# define COMPAT_VERSION(internal_name, api_name, version) \
+       asm(".symver " #internal_name "," #api_name "@" #version);
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+       asm(".symver " #internal_name "," #api_name "@@" #version);
+#else
+# define COMPAT_VERSION(internal_name, api_name, version)
+# define DEFAULT_VERSION(internal_name, api_name, version) \
+       extern typeof(internal_name) api_name \
+       __attribute__((alias(#internal_name)));
+#endif
+
 extern void libbpf_print(enum libbpf_print_level level,
                         const char *format, ...)
        __attribute__((format(printf, 2, 3)));
index 24fa313524fb216fa9390725c99fc4d0766c1564..a902838f9fccd37b63554363b5308729f7871bd3 100644 (file)
@@ -261,8 +261,8 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
        return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
                                        &config);
 }
-asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
-asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");
+COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
+DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
 
 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
 {
index ed61fb3a46c08e8c87daa1e59b549375139c226f..5b2cd5e58df09d11c94a939173ab2fce7826bb22 100644 (file)
@@ -20,7 +20,13 @@ MAKEFLAGS += --no-print-directory
 LIBFILE = $(OUTPUT)libsubcmd.a
 
 CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -fPIC
+
+ifeq ($(DEBUG),0)
+  ifeq ($(feature-fortify-source), 1)
+    CFLAGS += -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2
+  endif
+endif
 
 ifeq ($(CC_NO_CLANG), 0)
   CFLAGS += -O3
index 356b23a40339871321fe0c74b4230767335cb253..2b62ba1e72b75711e50782a574c207c41640ee82 100644 (file)
@@ -71,6 +71,9 @@ ifdef::backend-docbook[]
 [header]
 template::[header-declarations]
 <refentry>
+ifdef::perf_date[]
+<refentryinfo><date>{perf_date}</date></refentryinfo>
+endif::perf_date[]
 <refmeta>
 <refentrytitle>{mantitle}</refentrytitle>
 <manvolnum>{manvolnum}</manvolnum>
index 4c62b07136517d7808b314d48e06f78567ce060d..52152d156ad907e22c430e7ac48ef64c42f6032f 100644 (file)
@@ -36,8 +36,8 @@ III/ Jitdump file header format
 Each jitdump file starts with a fixed size header containing the following fields in order:
 
 
-* uint32_t magic     : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It is 0x4A695444 or 0x4454694a depending on the endianness. The field can be used to detect the endianness of the file
-* uint32_t version   : a 4-byte value representing the format version. It is currently set to 2
+* uint32_t magic     : a magic number tagging the file type. The value is 4-byte long and represents the string "JiTD" in ASCII form. It written is as 0x4A695444. The reader will detect an endian mismatch when it reads 0x4454694a.
+* uint32_t version   : a 4-byte value representing the format version. It is currently set to 1
 * uint32_t total_size: size in bytes of file header
 * uint32_t elf_mach  : ELF architecture encoding (ELF e_machine value as specified in /usr/include/elf.h)
 * uint32_t pad1      : padding. Reserved for future use
index e1d4b484cc4bf4cce60fad06e28cd67c8b754a9b..2ff6cedeb9c53715f4c4f8280bed959fd2be3202 100644 (file)
@@ -37,7 +37,7 @@ static int arm__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
 
        arm = zalloc(sizeof(*arm));
        if (!arm)
-               return -1;
+               return ENOMEM;
 
 #define ARM_CONDS "(cc|cs|eq|ge|gt|hi|le|ls|lt|mi|ne|pl|vc|vs)"
        err = regcomp(&arm->call_insn, "^blx?" ARM_CONDS "?$", REG_EXTENDED);
@@ -59,5 +59,5 @@ out_free_call:
        regfree(&arm->call_insn);
 out_free_arm:
        free(arm);
-       return -1;
+       return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
 }
index 43aa93ed8414d2649b4321a7c5886240d167dc9b..037e292ecd8eb0a6c3bb883bcb3c352f92a75491 100644 (file)
@@ -95,7 +95,7 @@ static int arm64__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
 
        arm = zalloc(sizeof(*arm));
        if (!arm)
-               return -1;
+               return ENOMEM;
 
        /* bl, blr */
        err = regcomp(&arm->call_insn, "^blr?$", REG_EXTENDED);
@@ -118,5 +118,5 @@ out_free_call:
        regfree(&arm->call_insn);
 out_free_arm:
        free(arm);
-       return -1;
+       return SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP;
 }
index b6b7bc7e31a173ccb8a2d52ae76522fdda206894..3b4cdfc5efd65bce12d74b125f5c6dfd85af639e 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -30,7 +31,7 @@ get_cpuid(char *buffer, size_t sz)
                buffer[nb-1] = '\0';
                return 0;
        }
-       return -1;
+       return ENOBUFS;
 }
 
 char *
index 89bb8f2c54ceec6da1647a6121b175bc8f1c5048..a50e70baf91839a2edf53b9ec5be910c5dc2e618 100644 (file)
@@ -164,8 +164,10 @@ static int s390__annotate_init(struct arch *arch, char *cpuid __maybe_unused)
        if (!arch->initialized) {
                arch->initialized = true;
                arch->associate_instruction_ops = s390__associate_ins_ops;
-               if (cpuid)
-                       err = s390__cpuid_parse(arch, cpuid);
+               if (cpuid) {
+                       if (s390__cpuid_parse(arch, cpuid))
+                               err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+               }
        }
 
        return err;
index 8b0b018d896ab30f798d9904a247d9613593d89b..7933f6871c8180e73a5fb33b57e14a65cbffda5e 100644 (file)
@@ -8,6 +8,7 @@
  */
 
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <string.h>
@@ -54,7 +55,7 @@ int get_cpuid(char *buffer, size_t sz)
 
        sysinfo = fopen(SYSINFO, "r");
        if (sysinfo == NULL)
-               return -1;
+               return errno;
 
        while ((read = getline(&line, &line_sz, sysinfo)) != -1) {
                if (!strncmp(line, SYSINFO_MANU, strlen(SYSINFO_MANU))) {
@@ -89,7 +90,7 @@ int get_cpuid(char *buffer, size_t sz)
 
        /* Missing manufacturer, type or model information should not happen */
        if (!manufacturer[0] || !type[0] || !model[0])
-               return -1;
+               return EINVAL;
 
        /*
         * Scan /proc/service_levels and return the CPU-MF counter facility
@@ -133,14 +134,14 @@ skip_sysinfo:
        else
                nbytes = snprintf(buffer, sz, "%s,%s,%s", manufacturer, type,
                                  model);
-       return (nbytes >= sz) ? -1 : 0;
+       return (nbytes >= sz) ? ENOBUFS : 0;
 }
 
 char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused)
 {
        char *buf = malloc(128);
 
-       if (buf && get_cpuid(buf, 128) < 0)
+       if (buf && get_cpuid(buf, 128))
                zfree(&buf);
        return buf;
 }
index 44f5aba78210e9892bedb25590a8983c1e5ae2d0..7eb5621c021d8d3276110e4123dd027a933f9b76 100644 (file)
@@ -196,8 +196,10 @@ static int x86__annotate_init(struct arch *arch, char *cpuid)
        if (arch->initialized)
                return 0;
 
-       if (cpuid)
-               err = x86__cpuid_parse(arch, cpuid);
+       if (cpuid) {
+               if (x86__cpuid_parse(arch, cpuid))
+                       err = SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING;
+       }
 
        arch->initialized = true;
        return err;
index 662ecf84a421b1d79273851eef806049c13fc2f4..aa6deb463bf3c7407fd47cde1edcd933c68c42ff 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include <sys/types.h>
+#include <errno.h>
 #include <unistd.h>
 #include <stdio.h>
 #include <stdlib.h>
@@ -58,7 +59,7 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt)
                buffer[nb-1] = '\0';
                return 0;
        }
-       return -1;
+       return ENOBUFS;
 }
 
 int
index 3542b6ab9813b5bf7d79a95aafbb1400cef13c2a..e69f44941aad9a7bed7f4a3cbc0d218f84985081 100644 (file)
@@ -2635,6 +2635,7 @@ static int build_cl_output(char *cl_sort, bool no_source)
        bool add_sym   = false;
        bool add_dso   = false;
        bool add_src   = false;
+       int ret = 0;
 
        if (!buf)
                return -ENOMEM;
@@ -2653,7 +2654,8 @@ static int build_cl_output(char *cl_sort, bool no_source)
                        add_dso = true;
                } else if (strcmp(tok, "offset")) {
                        pr_err("unrecognized sort token: %s\n", tok);
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto err;
                }
        }
 
@@ -2676,13 +2678,15 @@ static int build_cl_output(char *cl_sort, bool no_source)
                add_sym ? "symbol," : "",
                add_dso ? "dso," : "",
                add_src ? "cl_srcline," : "",
-               "node") < 0)
-               return -ENOMEM;
+               "node") < 0) {
+               ret = -ENOMEM;
+               goto err;
+       }
 
        c2c.show_src = add_src;
-
+err:
        free(buf);
-       return 0;
+       return ret;
 }
 
 static int setup_coalesce(const char *coalesce, bool no_source)
index 1e61e353f579bc6e6910eb9906485f2115eac69d..9661671cc26ec29029b895aa3a4c630a31e65cd0 100644 (file)
@@ -691,6 +691,7 @@ static char *compact_gfp_flags(char *gfp_flags)
                        new = realloc(new_flags, len + strlen(cpt) + 2);
                        if (new == NULL) {
                                free(new_flags);
+                               free(orig_flags);
                                return NULL;
                        }
 
index 2227e2f42c09d8e72243afa2fe892230b4417c4f..58a9e09894913741c47e129dcbb32fdd67042873 100644 (file)
@@ -705,14 +705,15 @@ static int process_sample_event(struct perf_tool *tool,
 
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
 {
-       char buf[64], *cpuid;
+       char buf[128], *cpuid;
        int err;
 
        if (kvm->live) {
                err = get_cpuid(buf, sizeof(buf));
                if (err != 0) {
-                       pr_err("Failed to look up CPU type\n");
-                       return err;
+                       pr_err("Failed to look up CPU type: %s\n",
+                              str_error_r(err, buf, sizeof(buf)));
+                       return -err;
                }
                cpuid = buf;
        } else
index 286fc70d740264f55c1c97d8ee8af3692bf766fb..67be8d31afab60f0c543a9517b850af43e0b5191 100644 (file)
@@ -1063,7 +1063,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                        continue;
 
                insn = 0;
-               for (off = 0;; off += ilen) {
+               for (off = 0; off < (unsigned)len; off += ilen) {
                        uint64_t ip = start + off;
 
                        printed += ip__fprintf_sym(ip, thread, x.cpumode, x.cpu, &lastsym, attr, fp);
@@ -1074,6 +1074,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                                        printed += print_srccode(thread, x.cpumode, ip);
                                break;
                        } else {
+                               ilen = 0;
                                printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", ip,
                                                   dump_insn(&x, ip, buffer + off, len - off, &ilen));
                                if (ilen == 0)
@@ -1083,6 +1084,8 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                                insn++;
                        }
                }
+               if (off != (unsigned)len)
+                       printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
        }
 
        /*
@@ -1123,6 +1126,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
                goto out;
        }
        for (off = 0; off <= end - start; off += ilen) {
+               ilen = 0;
                printed += fprintf(fp, "\t%016" PRIx64 "\t%s\n", start + off,
                                   dump_insn(&x, start + off, buffer + off, len - off, &ilen));
                if (ilen == 0)
index e2e0f06c97d066053ea6487698ca239ef4be57b4..cea13cb987d00c77a0bb33ccbfda62c6b6efd100 100755 (executable)
@@ -8,6 +8,7 @@ include/uapi/drm/i915_drm.h
 include/uapi/linux/fadvise.h
 include/uapi/linux/fcntl.h
 include/uapi/linux/fs.h
+include/uapi/linux/fscrypt.h
 include/uapi/linux/kcmp.h
 include/uapi/linux/kvm.h
 include/uapi/linux/in.h
index 1e148bbdf820f69efcd97fa07ec4befb993b1751..202cadaaf097bdbb0135bca174f7fa92bf208aa9 100644 (file)
@@ -2,7 +2,7 @@ jvmti-y += libjvmti.o
 jvmti-y += jvmti_agent.o
 
 # For strlcpy
-jvmti-y += libstring.o
+jvmti-y += libstring.o libctype.o
 
 CFLAGS_jvmti         = -fPIC -DPIC -I$(JDIR)/include -I$(JDIR)/include/linux
 CFLAGS_REMOVE_jvmti  = -Wmissing-declarations
@@ -15,3 +15,7 @@ CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PE
 $(OUTPUT)jvmti/libstring.o: ../lib/string.c FORCE
        $(call rule_mkdir)
        $(call if_changed_dep,cc_o_c)
+
+$(OUTPUT)jvmti/libctype.o: ../lib/ctype.c FORCE
+       $(call rule_mkdir)
+       $(call if_changed_dep,cc_o_c)
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json b/tools/perf/pmu-events/arch/s390/cf_m8561/basic.json
deleted file mode 100644 (file)
index 17fb524..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-[
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "0",
-               "EventName": "CPU_CYCLES",
-               "BriefDescription": "CPU Cycles",
-               "PublicDescription": "Cycle Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "1",
-               "EventName": "INSTRUCTIONS",
-               "BriefDescription": "Instructions",
-               "PublicDescription": "Instruction Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "2",
-               "EventName": "L1I_DIR_WRITES",
-               "BriefDescription": "L1I Directory Writes",
-               "PublicDescription": "Level-1 I-Cache Directory Write Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "3",
-               "EventName": "L1I_PENALTY_CYCLES",
-               "BriefDescription": "L1I Penalty Cycles",
-               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "4",
-               "EventName": "L1D_DIR_WRITES",
-               "BriefDescription": "L1D Directory Writes",
-               "PublicDescription": "Level-1 D-Cache Directory Write Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "5",
-               "EventName": "L1D_PENALTY_CYCLES",
-               "BriefDescription": "L1D Penalty Cycles",
-               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "32",
-               "EventName": "PROBLEM_STATE_CPU_CYCLES",
-               "BriefDescription": "Problem-State CPU Cycles",
-               "PublicDescription": "Problem-State Cycle Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "33",
-               "EventName": "PROBLEM_STATE_INSTRUCTIONS",
-               "BriefDescription": "Problem-State Instructions",
-               "PublicDescription": "Problem-State Instruction Count"
-       },
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json b/tools/perf/pmu-events/arch/s390/cf_m8561/crypto.json
deleted file mode 100644 (file)
index db286f1..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-[
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "64",
-               "EventName": "PRNG_FUNCTIONS",
-               "BriefDescription": "PRNG Functions",
-               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "65",
-               "EventName": "PRNG_CYCLES",
-               "BriefDescription": "PRNG Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "66",
-               "EventName": "PRNG_BLOCKED_FUNCTIONS",
-               "BriefDescription": "PRNG Blocked Functions",
-               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "67",
-               "EventName": "PRNG_BLOCKED_CYCLES",
-               "BriefDescription": "PRNG Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "68",
-               "EventName": "SHA_FUNCTIONS",
-               "BriefDescription": "SHA Functions",
-               "PublicDescription": "Total number of SHA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "69",
-               "EventName": "SHA_CYCLES",
-               "BriefDescription": "SHA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "70",
-               "EventName": "SHA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "SHA Blocked Functions",
-               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "71",
-               "EventName": "SHA_BLOCKED_CYCLES",
-               "BriefDescription": "SHA Bloced Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "72",
-               "EventName": "DEA_FUNCTIONS",
-               "BriefDescription": "DEA Functions",
-               "PublicDescription": "Total number of the DEA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "73",
-               "EventName": "DEA_CYCLES",
-               "BriefDescription": "DEA Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "74",
-               "EventName": "DEA_BLOCKED_FUNCTIONS",
-               "BriefDescription": "DEA Blocked Functions",
-               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "75",
-               "EventName": "DEA_BLOCKED_CYCLES",
-               "BriefDescription": "DEA Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "76",
-               "EventName": "AES_FUNCTIONS",
-               "BriefDescription": "AES Functions",
-               "PublicDescription": "Total number of AES functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "77",
-               "EventName": "AES_CYCLES",
-               "BriefDescription": "AES Cycles",
-               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "78",
-               "EventName": "AES_BLOCKED_FUNCTIONS",
-               "BriefDescription": "AES Blocked Functions",
-               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "79",
-               "EventName": "AES_BLOCKED_CYCLES",
-               "BriefDescription": "AES Blocked Cycles",
-               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
-       },
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_m8561/crypto6.json
deleted file mode 100644 (file)
index 5e36bc2..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-[
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "80",
-               "EventName": "ECC_FUNCTION_COUNT",
-               "BriefDescription": "ECC Function Count",
-               "PublicDescription": "Long ECC function Count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "81",
-               "EventName": "ECC_CYCLES_COUNT",
-               "BriefDescription": "ECC Cycles Count",
-               "PublicDescription": "Long ECC Function cycles count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "82",
-               "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
-               "BriefDescription": "Ecc Blocked Function Count",
-               "PublicDescription": "Long ECC blocked function count"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "83",
-               "EventName": "ECC_BLOCKED_CYCLES_COUNT",
-               "BriefDescription": "ECC Blocked Cycles Count",
-               "PublicDescription": "Long ECC blocked cycles count"
-       },
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json b/tools/perf/pmu-events/arch/s390/cf_m8561/extended.json
deleted file mode 100644 (file)
index 89e0707..0000000
+++ /dev/null
@@ -1,373 +0,0 @@
-[
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "128",
-               "EventName": "L1D_RO_EXCL_WRITES",
-               "BriefDescription": "L1D Read-only Exclusive Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "129",
-               "EventName": "DTLB2_WRITES",
-               "BriefDescription": "DTLB2 Writes",
-               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "130",
-               "EventName": "DTLB2_MISSES",
-               "BriefDescription": "DTLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "131",
-               "EventName": "DTLB2_HPAGE_WRITES",
-               "BriefDescription": "DTLB2 One-Megabyte Page Writes",
-               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "132",
-               "EventName": "DTLB2_GPAGE_WRITES",
-               "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
-               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "133",
-               "EventName": "L1D_L2D_SOURCED_WRITES",
-               "BriefDescription": "L1D L2D Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "134",
-               "EventName": "ITLB2_WRITES",
-               "BriefDescription": "ITLB2 Writes",
-               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "135",
-               "EventName": "ITLB2_MISSES",
-               "BriefDescription": "ITLB2 Misses",
-               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "136",
-               "EventName": "L1I_L2I_SOURCED_WRITES",
-               "BriefDescription": "L1I L2I Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "137",
-               "EventName": "TLB2_PTE_WRITES",
-               "BriefDescription": "TLB2 PTE Writes",
-               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "138",
-               "EventName": "TLB2_CRSTE_WRITES",
-               "BriefDescription": "TLB2 CRSTE Writes",
-               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "139",
-               "EventName": "TLB2_ENGINES_BUSY",
-               "BriefDescription": "TLB2 Engines Busy",
-               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "140",
-               "EventName": "TX_C_TEND",
-               "BriefDescription": "Completed TEND instructions in constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "141",
-               "EventName": "TX_NC_TEND",
-               "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
-               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "143",
-               "EventName": "L1C_TLB2_MISSES",
-               "BriefDescription": "L1C TLB2 Misses",
-               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "144",
-               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
-               "BriefDescription": "L1D On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "145",
-               "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1D On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "146",
-               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "147",
-               "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "148",
-               "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "149",
-               "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "150",
-               "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "151",
-               "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "152",
-               "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "153",
-               "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "154",
-               "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "155",
-               "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "156",
-               "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
-               "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "157",
-               "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
-               "BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "158",
-               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
-               "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
-               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "162",
-               "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
-               "BriefDescription": "L1I On-Chip L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "163",
-               "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1I On-Chip Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "164",
-               "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "165",
-               "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1I On-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "166",
-               "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1I On-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "167",
-               "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "168",
-               "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "169",
-               "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "170",
-               "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "171",
-               "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
-               "BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "172",
-               "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
-               "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "173",
-               "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
-               "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "174",
-               "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
-               "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "175",
-               "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
-               "BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
-               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "224",
-               "EventName": "BCD_DFP_EXECUTION_SLOTS",
-               "BriefDescription": "BCD DFP Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "225",
-               "EventName": "VX_BCD_EXECUTION_SLOTS",
-               "BriefDescription": "VX BCD Execution Slots",
-               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "226",
-               "EventName": "DECIMAL_INSTRUCTIONS",
-               "BriefDescription": "Decimal Instructions",
-               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "232",
-               "EventName": "LAST_HOST_TRANSLATIONS",
-               "BriefDescription": "Last host translation done",
-               "PublicDescription": "Last Host Translation done"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "243",
-               "EventName": "TX_NC_TABORT",
-               "BriefDescription": "Aborted transactions in non-constrained TX mode",
-               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "244",
-               "EventName": "TX_C_TABORT_NO_SPECIAL",
-               "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "245",
-               "EventName": "TX_C_TABORT_SPECIAL",
-               "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
-               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "448",
-               "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
-               "BriefDescription": "Cycle count with one thread active",
-               "PublicDescription": "Cycle count with one thread active"
-       },
-       {
-               "Unit": "CPU-M-CF",
-               "EventCode": "449",
-               "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
-               "BriefDescription": "Cycle count with two threads active",
-               "PublicDescription": "Cycle count with two threads active"
-       },
-]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/basic.json b/tools/perf/pmu-events/arch/s390/cf_z15/basic.json
new file mode 100644 (file)
index 0000000..17fb524
--- /dev/null
@@ -0,0 +1,58 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "0",
+               "EventName": "CPU_CYCLES",
+               "BriefDescription": "CPU Cycles",
+               "PublicDescription": "Cycle Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "1",
+               "EventName": "INSTRUCTIONS",
+               "BriefDescription": "Instructions",
+               "PublicDescription": "Instruction Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "2",
+               "EventName": "L1I_DIR_WRITES",
+               "BriefDescription": "L1I Directory Writes",
+               "PublicDescription": "Level-1 I-Cache Directory Write Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "3",
+               "EventName": "L1I_PENALTY_CYCLES",
+               "BriefDescription": "L1I Penalty Cycles",
+               "PublicDescription": "Level-1 I-Cache Penalty Cycle Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "4",
+               "EventName": "L1D_DIR_WRITES",
+               "BriefDescription": "L1D Directory Writes",
+               "PublicDescription": "Level-1 D-Cache Directory Write Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "5",
+               "EventName": "L1D_PENALTY_CYCLES",
+               "BriefDescription": "L1D Penalty Cycles",
+               "PublicDescription": "Level-1 D-Cache Penalty Cycle Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "32",
+               "EventName": "PROBLEM_STATE_CPU_CYCLES",
+               "BriefDescription": "Problem-State CPU Cycles",
+               "PublicDescription": "Problem-State Cycle Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "33",
+               "EventName": "PROBLEM_STATE_INSTRUCTIONS",
+               "BriefDescription": "Problem-State Instructions",
+               "PublicDescription": "Problem-State Instruction Count"
+       },
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto.json
new file mode 100644 (file)
index 0000000..db286f1
--- /dev/null
@@ -0,0 +1,114 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "64",
+               "EventName": "PRNG_FUNCTIONS",
+               "BriefDescription": "PRNG Functions",
+               "PublicDescription": "Total number of the PRNG functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "65",
+               "EventName": "PRNG_CYCLES",
+               "BriefDescription": "PRNG Cycles",
+               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing PRNG functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "66",
+               "EventName": "PRNG_BLOCKED_FUNCTIONS",
+               "BriefDescription": "PRNG Blocked Functions",
+               "PublicDescription": "Total number of the PRNG functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "67",
+               "EventName": "PRNG_BLOCKED_CYCLES",
+               "BriefDescription": "PRNG Blocked Cycles",
+               "PublicDescription": "Total number of CPU cycles blocked for the PRNG functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "68",
+               "EventName": "SHA_FUNCTIONS",
+               "BriefDescription": "SHA Functions",
+               "PublicDescription": "Total number of SHA functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "69",
+               "EventName": "SHA_CYCLES",
+               "BriefDescription": "SHA Cycles",
+               "PublicDescription": "Total number of CPU cycles when the SHA coprocessor is busy performing the SHA functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "70",
+               "EventName": "SHA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "SHA Blocked Functions",
+               "PublicDescription": "Total number of the SHA functions that are issued by the CPU and are blocked because the SHA coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "71",
+               "EventName": "SHA_BLOCKED_CYCLES",
+               "BriefDescription": "SHA Bloced Cycles",
+               "PublicDescription": "Total number of CPU cycles blocked for the SHA functions issued by the CPU because the SHA coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "72",
+               "EventName": "DEA_FUNCTIONS",
+               "BriefDescription": "DEA Functions",
+               "PublicDescription": "Total number of the DEA functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "73",
+               "EventName": "DEA_CYCLES",
+               "BriefDescription": "DEA Cycles",
+               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the DEA functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "74",
+               "EventName": "DEA_BLOCKED_FUNCTIONS",
+               "BriefDescription": "DEA Blocked Functions",
+               "PublicDescription": "Total number of the DEA functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "75",
+               "EventName": "DEA_BLOCKED_CYCLES",
+               "BriefDescription": "DEA Blocked Cycles",
+               "PublicDescription": "Total number of CPU cycles blocked for the DEA functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "76",
+               "EventName": "AES_FUNCTIONS",
+               "BriefDescription": "AES Functions",
+               "PublicDescription": "Total number of AES functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "77",
+               "EventName": "AES_CYCLES",
+               "BriefDescription": "AES Cycles",
+               "PublicDescription": "Total number of CPU cycles when the DEA/AES coprocessor is busy performing the AES functions issued by the CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "78",
+               "EventName": "AES_BLOCKED_FUNCTIONS",
+               "BriefDescription": "AES Blocked Functions",
+               "PublicDescription": "Total number of AES functions that are issued by the CPU and are blocked because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "79",
+               "EventName": "AES_BLOCKED_CYCLES",
+               "BriefDescription": "AES Blocked Cycles",
+               "PublicDescription": "Total number of CPU cycles blocked for the AES functions issued by the CPU because the DEA/AES coprocessor is busy performing a function issued by another CPU"
+       },
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json b/tools/perf/pmu-events/arch/s390/cf_z15/crypto6.json
new file mode 100644 (file)
index 0000000..5e36bc2
--- /dev/null
@@ -0,0 +1,30 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "80",
+               "EventName": "ECC_FUNCTION_COUNT",
+               "BriefDescription": "ECC Function Count",
+               "PublicDescription": "Long ECC function Count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "81",
+               "EventName": "ECC_CYCLES_COUNT",
+               "BriefDescription": "ECC Cycles Count",
+               "PublicDescription": "Long ECC Function cycles count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "82",
+               "EventName": "ECC_BLOCKED_FUNCTION_COUNT",
+               "BriefDescription": "Ecc Blocked Function Count",
+               "PublicDescription": "Long ECC blocked function count"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "83",
+               "EventName": "ECC_BLOCKED_CYCLES_COUNT",
+               "BriefDescription": "ECC Blocked Cycles Count",
+               "PublicDescription": "Long ECC blocked cycles count"
+       },
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/extended.json b/tools/perf/pmu-events/arch/s390/cf_z15/extended.json
new file mode 100644 (file)
index 0000000..89e0707
--- /dev/null
@@ -0,0 +1,373 @@
+[
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "128",
+               "EventName": "L1D_RO_EXCL_WRITES",
+               "BriefDescription": "L1D Read-only Exclusive Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache where the line was originally in a Read-Only state in the cache but has been updated to be in the Exclusive state that allows stores to the cache line"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "129",
+               "EventName": "DTLB2_WRITES",
+               "BriefDescription": "DTLB2 Writes",
+               "PublicDescription": "A translation has been written into The Translation Lookaside Buffer 2 (TLB2) and the request was made by the data cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "130",
+               "EventName": "DTLB2_MISSES",
+               "BriefDescription": "DTLB2 Misses",
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the data cache. Incremented by one for every TLB2 miss in progress for the Level-1 Data cache on this cycle"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "131",
+               "EventName": "DTLB2_HPAGE_WRITES",
+               "BriefDescription": "DTLB2 One-Megabyte Page Writes",
+               "PublicDescription": "A translation entry was written into the Combined Region and Segment Table Entry array in the Level-2 TLB for a one-megabyte page or a Last Host Translation was done"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "132",
+               "EventName": "DTLB2_GPAGE_WRITES",
+               "BriefDescription": "DTLB2 Two-Gigabyte Page Writes",
+               "PublicDescription": "A translation entry for a two-gigabyte page was written into the Level-2 TLB"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "133",
+               "EventName": "L1D_L2D_SOURCED_WRITES",
+               "BriefDescription": "L1D L2D Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from the Level-2 Data cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "134",
+               "EventName": "ITLB2_WRITES",
+               "BriefDescription": "ITLB2 Writes",
+               "PublicDescription": "A translation entry has been written into the Translation Lookaside Buffer 2 (TLB2) and the request was made by the instruction cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "135",
+               "EventName": "ITLB2_MISSES",
+               "BriefDescription": "ITLB2 Misses",
+               "PublicDescription": "A TLB2 miss is in progress for a request made by the instruction cache. Incremented by one for every TLB2 miss in progress for the Level-1 Instruction cache in a cycle"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "136",
+               "EventName": "L1I_L2I_SOURCED_WRITES",
+               "BriefDescription": "L1I L2I Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from the Level-2 Instruction cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "137",
+               "EventName": "TLB2_PTE_WRITES",
+               "BriefDescription": "TLB2 PTE Writes",
+               "PublicDescription": "A translation entry was written into the Page Table Entry array in the Level-2 TLB"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "138",
+               "EventName": "TLB2_CRSTE_WRITES",
+               "BriefDescription": "TLB2 CRSTE Writes",
+               "PublicDescription": "Translation entries were written into the Combined Region and Segment Table Entry array and the Page Table Entry array in the Level-2 TLB"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "139",
+               "EventName": "TLB2_ENGINES_BUSY",
+               "BriefDescription": "TLB2 Engines Busy",
+               "PublicDescription": "The number of Level-2 TLB translation engines busy in a cycle"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "140",
+               "EventName": "TX_C_TEND",
+               "BriefDescription": "Completed TEND instructions in constrained TX mode",
+               "PublicDescription": "A TEND instruction has completed in a constrained transactional-execution mode"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "141",
+               "EventName": "TX_NC_TEND",
+               "BriefDescription": "Completed TEND instructions in non-constrained TX mode",
+               "PublicDescription": "A TEND instruction has completed in a non-constrained transactional-execution mode"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "143",
+               "EventName": "L1C_TLB2_MISSES",
+               "BriefDescription": "L1C TLB2 Misses",
+               "PublicDescription": "Increments by one for any cycle where a level-1 cache or level-2 TLB miss is in progress"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "144",
+               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES",
+               "BriefDescription": "L1D On-Chip L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "145",
+               "EventName": "L1D_ONCHIP_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1D On-Chip Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "146",
+               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1D On-Chip L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Chip Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "147",
+               "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1D On-Cluster L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Cluster Level-3 cache withountervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "148",
+               "EventName": "L1D_ONCLUSTER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1D On-Cluster Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "149",
+               "EventName": "L1D_ONCLUSTER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1D On-Cluster L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "150",
+               "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1D Off-Cluster L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "151",
+               "EventName": "L1D_OFFCLUSTER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1D Off-Cluster Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Cluster memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "152",
+               "EventName": "L1D_OFFCLUSTER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1D Off-Cluster L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "153",
+               "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1D Off-Drawer L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "154",
+               "EventName": "L1D_OFFDRAWER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1D Off-Drawer Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "155",
+               "EventName": "L1D_OFFDRAWER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1D Off-Drawer L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "156",
+               "EventName": "L1D_ONDRAWER_L4_SOURCED_WRITES",
+               "BriefDescription": "L1D On-Drawer L4 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "157",
+               "EventName": "L1D_OFFDRAWER_L4_SOURCED_WRITES",
+               "BriefDescription": "L1D Off-Drawer L4 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "158",
+               "EventName": "L1D_ONCHIP_L3_SOURCED_WRITES_RO",
+               "BriefDescription": "L1D On-Chip L3 Sourced Writes read-only",
+               "PublicDescription": "A directory write to the Level-1 Data cache directory where the returned cache line was sourced from On-Chip L3 but a read-only invalidate was done to remove other copies of the cache line"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "162",
+               "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES",
+               "BriefDescription": "L1I On-Chip L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "163",
+               "EventName": "L1I_ONCHIP_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1I On-Chip Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from On-Chip memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "164",
+               "EventName": "L1I_ONCHIP_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1I On-Chip L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache ine was sourced from an On-Chip Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "165",
+               "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1I On-Cluster L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "166",
+               "EventName": "L1I_ONCLUSTER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1I On-Cluster Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an On-Cluster memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "167",
+               "EventName": "L1I_ONCLUSTER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1I On-Cluster L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Cluster Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "168",
+               "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1I Off-Cluster L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "169",
+               "EventName": "L1I_OFFCLUSTER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1I Off-Cluster Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Cluster memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "170",
+               "EventName": "L1I_OFFCLUSTER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1I Off-Cluster L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Cluster Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "171",
+               "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES",
+               "BriefDescription": "L1I Off-Drawer L3 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache without intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "172",
+               "EventName": "L1I_OFFDRAWER_MEMORY_SOURCED_WRITES",
+               "BriefDescription": "L1I Off-Drawer Memory Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer memory"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "173",
+               "EventName": "L1I_OFFDRAWER_L3_SOURCED_WRITES_IV",
+               "BriefDescription": "L1I Off-Drawer L3 Sourced Writes with Intervention",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from an Off-Drawer Level-3 cache with intervention"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "174",
+               "EventName": "L1I_ONDRAWER_L4_SOURCED_WRITES",
+               "BriefDescription": "L1I On-Drawer L4 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from On-Drawer Level-4 cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "175",
+               "EventName": "L1I_OFFDRAWER_L4_SOURCED_WRITES",
+               "BriefDescription": "L1I Off-Drawer L4 Sourced Writes",
+               "PublicDescription": "A directory write to the Level-1 Instruction cache directory where the returned cache line was sourced from Off-Drawer Level-4 cache"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "224",
+               "EventName": "BCD_DFP_EXECUTION_SLOTS",
+               "BriefDescription": "BCD DFP Execution Slots",
+               "PublicDescription": "Count of floating point execution slots used for finished Binary Coded Decimal to Decimal Floating Point conversions. Instructions: CDZT, CXZT, CZDT, CZXT"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "225",
+               "EventName": "VX_BCD_EXECUTION_SLOTS",
+               "BriefDescription": "VX BCD Execution Slots",
+               "PublicDescription": "Count of floating point execution slots used for finished vector arithmetic Binary Coded Decimal instructions. Instructions: VAP, VSP, VMPVMSP, VDP, VSDP, VRP, VLIP, VSRP, VPSOPVCP, VTP, VPKZ, VUPKZ, VCVB, VCVBG, VCVDVCVDG"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "226",
+               "EventName": "DECIMAL_INSTRUCTIONS",
+               "BriefDescription": "Decimal Instructions",
+               "PublicDescription": "Decimal instructions dispatched. Instructions: CVB, CVD, AP, CP, DP, ED, EDMK, MP, SRP, SP, ZAP"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "232",
+               "EventName": "LAST_HOST_TRANSLATIONS",
+               "BriefDescription": "Last host translation done",
+               "PublicDescription": "Last Host Translation done"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "243",
+               "EventName": "TX_NC_TABORT",
+               "BriefDescription": "Aborted transactions in non-constrained TX mode",
+               "PublicDescription": "A transaction abort has occurred in a non-constrained transactional-execution mode"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "244",
+               "EventName": "TX_C_TABORT_NO_SPECIAL",
+               "BriefDescription": "Aborted transactions in constrained TX mode not using special completion logic",
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is not using any special logic to allow the transaction to complete"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "245",
+               "EventName": "TX_C_TABORT_SPECIAL",
+               "BriefDescription": "Aborted transactions in constrained TX mode using special completion logic",
+               "PublicDescription": "A transaction abort has occurred in a constrained transactional-execution mode and the CPU is using special logic to allow the transaction to complete"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "448",
+               "EventName": "MT_DIAG_CYCLES_ONE_THR_ACTIVE",
+               "BriefDescription": "Cycle count with one thread active",
+               "PublicDescription": "Cycle count with one thread active"
+       },
+       {
+               "Unit": "CPU-M-CF",
+               "EventCode": "449",
+               "EventName": "MT_DIAG_CYCLES_TWO_THR_ACTIVE",
+               "BriefDescription": "Cycle count with two threads active",
+               "PublicDescription": "Cycle count with two threads active"
+       },
+]
diff --git a/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json b/tools/perf/pmu-events/arch/s390/cf_z15/transaction.json
new file mode 100644 (file)
index 0000000..1a0034f
--- /dev/null
@@ -0,0 +1,7 @@
+[
+  {
+    "BriefDescription": "Transaction count",
+    "MetricName": "transaction",
+    "MetricExpr": "TX_C_TEND + TX_NC_TEND + TX_NC_TABORT + TX_C_TABORT_SPECIAL + TX_C_TABORT_NO_SPECIAL"
+  }
+]
index bd3fc577139cc76b527d10aed9d3d2e5c9b2a325..61641a3480e03cca04ed33d53ee2fd1f52fe5fd8 100644 (file)
@@ -4,4 +4,4 @@ Family-model,Version,Filename,EventType
 ^IBM.282[78].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_zec12,core
 ^IBM.296[45].*[13]\.[1-5].[[:xdigit:]]+$,1,cf_z13,core
 ^IBM.390[67].*[13]\.[1-5].[[:xdigit:]]+$,3,cf_z14,core
-^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_m8561,core
+^IBM.856[12].*3\.6.[[:xdigit:]]+$,3,cf_z15,core
index 9e37287da9248f1b0e7de3c1996ecaaa624e58c1..e2837260ca4df923e9fbb276a24e5f5e30d3d13a 100644 (file)
@@ -450,12 +450,12 @@ static struct fixed {
        const char *name;
        const char *event;
 } fixed[] = {
-       { "inst_retired.any", "event=0xc0" },
-       { "inst_retired.any_p", "event=0xc0" },
-       { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
-       { "cpu_clk_unhalted.thread", "event=0x3c" },
-       { "cpu_clk_unhalted.core", "event=0x3c" },
-       { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
+       { "inst_retired.any", "event=0xc0,period=2000003" },
+       { "inst_retired.any_p", "event=0xc0,period=2000003" },
+       { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03,period=2000003" },
+       { "cpu_clk_unhalted.thread", "event=0x3c,period=2000003" },
+       { "cpu_clk_unhalted.core", "event=0x3c,period=2000003" },
+       { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1,period=2000003" },
        { NULL, NULL},
 };
 
index dbc27199c65e7ae6e6238ccdffbfdb48e0b17986..dd865e0bea12d4e5e6dbd86b60883dca7318de35 100644 (file)
@@ -19,12 +19,11 @@ static void sigsegv_handler(int sig __maybe_unused)
 static void the_hook(void *_hook_flags)
 {
        int *hook_flags = _hook_flags;
-       int *p = NULL;
 
        *hook_flags = 1234;
 
        /* Generate a segfault, test perf_hooks__recover */
-       *p = 0;
+       raise(SIGSEGV);
 }
 
 int test__perf_hooks(struct test *test __maybe_unused, int subtest __maybe_unused)
index e830eadfca2ab0ef98f903aa1119574128abb1a8..e42bf572358c7edd584ad43309f0e02a336dfebd 100644 (file)
@@ -1631,6 +1631,19 @@ int symbol__strerror_disassemble(struct symbol *sym __maybe_unused, struct map *
        case SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF:
                scnprintf(buf, buflen, "Please link with binutils's libopcode to enable BPF annotation");
                break;
+       case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP:
+               scnprintf(buf, buflen, "Problems with arch specific instruction name regular expressions.");
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING:
+               scnprintf(buf, buflen, "Problems while parsing the CPUID in the arch specific initialization.");
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE:
+               scnprintf(buf, buflen, "Invalid BPF file: %s.", dso->long_name);
+               break;
+       case SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF:
+               scnprintf(buf, buflen, "The %s BPF file has no BTF section, compile with -g or use pahole -J.",
+                         dso->long_name);
+               break;
        default:
                scnprintf(buf, buflen, "Internal error: Invalid %d error code\n", errnum);
                break;
@@ -1662,7 +1675,7 @@ static int dso__disassemble_filename(struct dso *dso, char *filename, size_t fil
 
        build_id_path = strdup(filename);
        if (!build_id_path)
-               return -1;
+               return ENOMEM;
 
        /*
         * old style build-id cache has name of XX/XXXXXXX.. while
@@ -1713,13 +1726,13 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        char tpath[PATH_MAX];
        size_t buf_size;
        int nr_skip = 0;
-       int ret = -1;
        char *buf;
        bfd *bfdf;
+       int ret;
        FILE *s;
 
        if (dso->binary_type != DSO_BINARY_TYPE__BPF_PROG_INFO)
-               return -1;
+               return SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE;
 
        pr_debug("%s: handling sym %s addr %" PRIx64 " len %" PRIx64 "\n", __func__,
                  sym->name, sym->start, sym->end - sym->start);
@@ -1732,8 +1745,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
        assert(bfd_check_format(bfdf, bfd_object));
 
        s = open_memstream(&buf, &buf_size);
-       if (!s)
+       if (!s) {
+               ret = errno;
                goto out;
+       }
        init_disassemble_info(&info, s,
                              (fprintf_ftype) fprintf);
 
@@ -1742,8 +1757,10 @@ static int symbol__disassemble_bpf(struct symbol *sym,
 
        info_node = perf_env__find_bpf_prog_info(dso->bpf_prog.env,
                                                 dso->bpf_prog.id);
-       if (!info_node)
+       if (!info_node) {
+               ret = SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF;
                goto out;
+       }
        info_linear = info_node->info_linear;
        sub_id = dso->bpf_prog.sub_id;
 
@@ -2071,11 +2088,11 @@ int symbol__annotate(struct symbol *sym, struct map *map,
        int err;
 
        if (!arch_name)
-               return -1;
+               return errno;
 
        args.arch = arch = arch__find(arch_name);
        if (arch == NULL)
-               return -ENOTSUP;
+               return ENOTSUP;
 
        if (parch)
                *parch = arch;
@@ -2971,7 +2988,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
 
        notes->offsets = zalloc(size * sizeof(struct annotation_line *));
        if (notes->offsets == NULL)
-               return -1;
+               return ENOMEM;
 
        if (perf_evsel__is_group_event(evsel))
                nr_pcnt = evsel->core.nr_members;
@@ -2997,7 +3014,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct evsel *evsel,
 
 out_free_offsets:
        zfree(&notes->offsets);
-       return -1;
+       return err;
 }
 
 #define ANNOTATION__CFG(n) \
index d94be9140e314d6e66ef083498e0395f772cdd3f..d76fd0e81f4658ed2b1f3e23874d031b094bdaf6 100644 (file)
@@ -370,6 +370,10 @@ enum symbol_disassemble_errno {
 
        SYMBOL_ANNOTATE_ERRNO__NO_VMLINUX       = __SYMBOL_ANNOTATE_ERRNO__START,
        SYMBOL_ANNOTATE_ERRNO__NO_LIBOPCODES_FOR_BPF,
+       SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_CPUID_PARSING,
+       SYMBOL_ANNOTATE_ERRNO__ARCH_INIT_REGEXP,
+       SYMBOL_ANNOTATE_ERRNO__BPF_INVALID_FILE,
+       SYMBOL_ANNOTATE_ERRNO__BPF_MISSING_BTF,
 
        __SYMBOL_ANNOTATE_ERRNO__END,
 };
index 3fa0db1366676aa2ac047d6a5674e04c453837ba..47e03de7c2358a0d2c7150190a5b811b4ef7b133 100644 (file)
@@ -101,14 +101,16 @@ static int copyfile_mode_ns(const char *from, const char *to, mode_t mode,
        if (tofd < 0)
                goto out;
 
-       if (fchmod(tofd, mode))
-               goto out_close_to;
-
        if (st.st_size == 0) { /* /proc? do it slowly... */
                err = slow_copyfile(from, tmp, nsi);
+               if (!err && fchmod(tofd, mode))
+                       err = -1;
                goto out_close_to;
        }
 
+       if (fchmod(tofd, mode))
+               goto out_close_to;
+
        nsinfo__mountns_enter(nsi, &nsc);
        fromfd = open(from, O_RDONLY);
        nsinfo__mountns_exit(&nsc);
index d277a98e62df8f769dbf88e45b6b635b085050e6..de79c735e4411b182ce9da0d4d8baee120eaed15 100644 (file)
@@ -1659,7 +1659,7 @@ struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
                        is_open = false;
                if (c2->leader == leader) {
                        if (is_open)
-                               perf_evsel__close(&evsel->core);
+                               perf_evsel__close(&c2->core);
                        c2->leader = c2;
                        c2->core.nr_members = 0;
                }
index 5591af81a070653f27ba6b23d8687128155c1396..abc7fda4a0fe18db91e75b10d527e90107352038 100644 (file)
@@ -30,6 +30,7 @@
 #include "counts.h"
 #include "event.h"
 #include "evsel.h"
+#include "util/env.h"
 #include "util/evsel_config.h"
 #include "util/evsel_fprintf.h"
 #include "evlist.h"
@@ -2512,7 +2513,7 @@ struct perf_env *perf_evsel__env(struct evsel *evsel)
 {
        if (evsel && evsel->evlist)
                return evsel->evlist->env;
-       return NULL;
+       return &perf_env;
 }
 
 static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
index 86d9396cb131cdca6731da256f3a10460827b6cf..becc2d1094237580901dfe0578ad63900cc77635 100644 (file)
@@ -1296,8 +1296,10 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp)
                        continue;
 
                if (WARN_ONCE(cnt >= size,
-                             "failed to write MEM_TOPOLOGY, way too many nodes\n"))
+                       "failed to write MEM_TOPOLOGY, way too many nodes\n")) {
+                       closedir(dir);
                        return -1;
+               }
 
                ret = memory_node__read(&nodes[cnt++], idx);
        }
index 1bdf4c6ea3e5050398a526447019364dd6f5c16f..e3ccb0ce1938d30e0a3cdc42a75bf96ac1f7db28 100644 (file)
@@ -395,7 +395,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
        size_t size;
        u16 idr_size;
        const char *sym;
-       uint32_t count;
+       uint64_t count;
        int ret, csize, usize;
        pid_t pid, tid;
        struct {
@@ -418,7 +418,7 @@ static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%u.so",
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                        jd->dir,
                        pid,
                        count);
@@ -529,7 +529,7 @@ static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
                return -1;
 
        filename = event->mmap2.filename;
-       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%"PRIu64,
+       size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
                 jd->dir,
                 pid,
                 jr->move.code_index);
index 8d04e3d070b12b95647dc07bd9ade05b3b32b24a..8b14e4a7f1dc7b779f83c09046238276f9bde387 100644 (file)
@@ -233,14 +233,14 @@ static int detect_kbuild_dir(char **kbuild_dir)
        const char *prefix_dir = "";
        const char *suffix_dir = "";
 
+       /* _UTSNAME_LENGTH is 65 */
+       char release[128];
+
        char *autoconf_path;
 
        int err;
 
        if (!test_dir) {
-               /* _UTSNAME_LENGTH is 65 */
-               char release[128];
-
                err = fetch_kernel_version(NULL, release,
                                           sizeof(release));
                if (err)
index 5b83ed1ebbd6bae6c905ab23c6781c84d2d342ea..eec9b282c04790fb4115c3435822547ce04f84be 100644 (file)
@@ -1,5 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 #include "symbol.h"
+#include <assert.h>
 #include <errno.h>
 #include <inttypes.h>
 #include <limits.h>
@@ -850,6 +851,8 @@ static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp
                        }
 
                        after->start = map->end;
+                       after->pgoff += map->end - pos->start;
+                       assert(pos->map_ip(pos, map->end) == after->map_ip(after, map->end));
                        __map_groups__insert(pos->groups, after);
                        if (verbose >= 2 && !use_browser)
                                map__fprintf(after, fp);
index 53f31053a27a8ef97c61ff0d55074669f53d0848..02460362256d17f5833320cbb6db5ebe0768907e 100644 (file)
@@ -14,6 +14,7 @@
 #include "thread_map.h"
 #include "trace-event.h"
 #include "mmap.h"
+#include "util/env.h"
 #include <internal/lib.h>
 #include "../perf-sys.h"
 
@@ -53,6 +54,11 @@ int parse_callchain_record(const char *arg __maybe_unused,
        return 0;
 }
 
+/*
+ * Add this one here not to drag util/env.c
+ */
+struct perf_env perf_env;
+
 /*
  * Support debug printing even though util/debug.c is not linked.  That means
  * implementing 'verbose' and 'eprintf'.
index 5eda6e19c94745e4df80a0df5c25b91919f47d2e..ae56c766eda16e0d51560fcea4532843d2c96bb8 100644 (file)
@@ -154,8 +154,10 @@ static int rm_rf_depth_pat(const char *path, int depth, const char **pat)
                if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
                        continue;
 
-               if (!match_pat(d->d_name, pat))
-                       return -2;
+               if (!match_pat(d->d_name, pat)) {
+                       ret =  -2;
+                       break;
+               }
 
                scnprintf(namebuf, sizeof(namebuf), "%s/%s",
                          path, d->d_name);
index c3feccb99ff5d1bd6ffa7e25655ea0297941e761..4cdbae6f4e61b39083182bcece354a7e381ac6a8 100644 (file)
@@ -63,6 +63,13 @@ TARGETS += zram
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
+# User can optionally provide a TARGETS skiplist.
+SKIP_TARGETS ?=
+ifneq ($(SKIP_TARGETS),)
+       TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
+       override TARGETS := $(TMP)
+endif
+
 # Clear LDFLAGS and MAKEFLAGS if called from main
 # Makefile to avoid test build failures when test
 # Makefile doesn't have explicit build rules.
@@ -171,9 +178,12 @@ run_pstore_crash:
 # 1. output_dir=kernel_src
 # 2. a separate output directory is specified using O= KBUILD_OUTPUT
 # 3. a separate output directory is specified using KBUILD_OUTPUT
+# Avoid conflict with INSTALL_PATH set by the main Makefile
 #
-INSTALL_PATH ?= $(BUILD)/install
-INSTALL_PATH := $(abspath $(INSTALL_PATH))
+KSFT_INSTALL_PATH ?= $(BUILD)/kselftest_install
+KSFT_INSTALL_PATH := $(abspath $(KSFT_INSTALL_PATH))
+# Avoid changing the rest of the logic here and lib.mk.
+INSTALL_PATH := $(KSFT_INSTALL_PATH)
 ALL_SCRIPT := $(INSTALL_PATH)/run_kselftest.sh
 
 install: all
@@ -198,11 +208,16 @@ ifdef INSTALL_PATH
        echo "  cat /dev/null > \$$logfile" >> $(ALL_SCRIPT)
        echo "fi" >> $(ALL_SCRIPT)
 
+       @# While building run_kselftest.sh skip also non-existent TARGET dirs:
+       @# they could be the result of a build failure and should NOT be
+       @# included in the generated runlist.
        for TARGET in $(TARGETS); do \
                BUILD_TARGET=$$BUILD/$$TARGET;  \
+               [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
                echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
                echo "cd $$TARGET" >> $(ALL_SCRIPT); \
                echo -n "run_many" >> $(ALL_SCRIPT); \
+               echo -n "Emit Tests for $$TARGET\n"; \
                $(MAKE) -s --no-print-directory OUTPUT=$$BUILD_TARGET -C $$TARGET emit_tests >> $(ALL_SCRIPT); \
                echo "" >> $(ALL_SCRIPT);           \
                echo "cd \$$ROOT" >> $(ALL_SCRIPT); \
index 6cbeea7b4bf16ab1d7a49aa8379ef753e35ce304..8547ecbdc61ff3193465de061ebb5fb94109129a 100644 (file)
@@ -195,7 +195,7 @@ static void run_test(int cgroup_fd)
 
        if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
                                      (void *)&server_fd)))
-               goto close_bpf_object;
+               goto close_server_fd;
 
        pthread_mutex_lock(&server_started_mtx);
        pthread_cond_wait(&server_started, &server_started_mtx);
index a82da555b1b0217af5c1d84faaeb8b08835c006e..f4cd60d6fba2e1c8ef4142b493a663204670c620 100644 (file)
@@ -260,13 +260,14 @@ void test_tcp_rtt(void)
 
        if (CHECK_FAIL(pthread_create(&tid, NULL, server_thread,
                                      (void *)&server_fd)))
-               goto close_cgroup_fd;
+               goto close_server_fd;
 
        pthread_mutex_lock(&server_started_mtx);
        pthread_cond_wait(&server_started, &server_started_mtx);
        pthread_mutex_unlock(&server_started_mtx);
 
        CHECK_FAIL(run_test(cgroup_fd, server_fd));
+close_server_fd:
        close(server_fd);
 close_cgroup_fd:
        close(cgroup_fd);
index d23d4da66b834858a2307517b65529aba0622e86..e2d06191bd35c67cbdbd036716c1475723953ae7 100755 (executable)
@@ -63,6 +63,9 @@ fi
 
 # Setup
 tc qdisc add dev lo ingress
+echo 0 > /proc/sys/net/ipv4/conf/default/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/all/rp_filter
+echo 0 > /proc/sys/net/ipv4/conf/lo/rp_filter
 
 echo "Testing IPv4..."
 # Drops all IP/UDP packets coming from port 9
index acf7a74f97cd9bb61420205d34cea328f2380084..59ea56945e6cd6819dc12fe337f324d784449e48 100755 (executable)
@@ -314,15 +314,15 @@ test_gso()
        command -v nc >/dev/null 2>&1 || \
                { echo >&2 "nc is not available: skipping TSO tests"; return; }
 
-       # listen on IPv*_DST, capture TCP into $TMPFILE
+       # listen on port 9000, capture TCP into $TMPFILE
        if [ "${PROTO}" == "IPv4" ] ; then
                IP_DST=${IPv4_DST}
                ip netns exec ${NS3} bash -c \
-                       "nc -4 -l -s ${IPv4_DST} -p 9000 > ${TMPFILE} &"
+                       "nc -4 -l -p 9000 > ${TMPFILE} &"
        elif [ "${PROTO}" == "IPv6" ] ; then
                IP_DST=${IPv6_DST}
                ip netns exec ${NS3} bash -c \
-                       "nc -6 -l -s ${IPv6_DST} -p 9000 > ${TMPFILE} &"
+                       "nc -6 -l -p 9000 > ${TMPFILE} &"
                RET=$?
        else
                echo "    test_gso: unknown PROTO: ${PROTO}"
index 15a666329a34df435dad7021845c0cbb377fb615..1afa22c88e42a85e49a299eb1e0ff72f8fbbae3d 100755 (executable)
@@ -22,6 +22,7 @@ import os
 import pprint
 import random
 import re
+import stat
 import string
 import struct
 import subprocess
@@ -311,7 +312,11 @@ class DebugfsDir:
         for f in out.split():
             if f == "ports":
                 continue
+
             p = os.path.join(path, f)
+            if not os.stat(p).st_mode & stat.S_IRUSR:
+                continue
+
             if os.path.isfile(p):
                 _, out = cmd('cat %s/%s' % (path, f))
                 dfs[f] = out.strip()
index f38567ef694b6e34cae650cad1676e1fe7391d6e..daa7d1b8d30925ea09a1357cd5cecae709bb7a39 100755 (executable)
@@ -59,7 +59,7 @@ ip netns exec ${NS_SRC} tc filter add dev veth_src egress \
 
 # start the listener
 ip netns exec ${NS_DST} bash -c \
-       "nc -4 -l -s ${IP_DST} -p 9000 >/dev/null &"
+       "nc -4 -l -p 9000 >/dev/null &"
 declare -i NC_PID=$!
 sleep 1
 
index 00c9020bdda8b4f133807b0b6eb86f5ec90331e4..84de7bc74f2cf1f5272a8bd3b0f81ecd00a296a6 100644 (file)
@@ -3,9 +3,14 @@
 #
 # Runs a set of tests in a given subdirectory.
 export skip_rc=4
+export timeout_rc=124
 export logfile=/dev/stdout
 export per_test_logging=
 
+# Defaults for "settings" file fields:
+# "timeout" how many seconds to let each test run before failing.
+export kselftest_default_timeout=45
+
 # There isn't a shell-agnostic way to find the path of a sourced file,
 # so we must rely on BASE_DIR being set to find other tools.
 if [ -z "$BASE_DIR" ]; then
@@ -24,6 +29,16 @@ tap_prefix()
        fi
 }
 
+tap_timeout()
+{
+       # Make sure tests will time out if utility is available.
+       if [ -x /usr/bin/timeout ] ; then
+               /usr/bin/timeout "$kselftest_timeout" "$1"
+       else
+               "$1"
+       fi
+}
+
 run_one()
 {
        DIR="$1"
@@ -32,6 +47,18 @@ run_one()
 
        BASENAME_TEST=$(basename $TEST)
 
+       # Reset any "settings"-file variables.
+       export kselftest_timeout="$kselftest_default_timeout"
+       # Load per-test-directory kselftest "settings" file.
+       settings="$BASE_DIR/$DIR/settings"
+       if [ -r "$settings" ] ; then
+               while read line ; do
+                       field=$(echo "$line" | cut -d= -f1)
+                       value=$(echo "$line" | cut -d= -f2-)
+                       eval "kselftest_$field"="$value"
+               done < "$settings"
+       fi
+
        TEST_HDR_MSG="selftests: $DIR: $BASENAME_TEST"
        echo "# $TEST_HDR_MSG"
        if [ ! -x "$TEST" ]; then
@@ -44,14 +71,17 @@ run_one()
                echo "not ok $test_num $TEST_HDR_MSG"
        else
                cd `dirname $TEST` > /dev/null
-               (((((./$BASENAME_TEST 2>&1; echo $? >&3) |
+               ((((( tap_timeout ./$BASENAME_TEST 2>&1; echo $? >&3) |
                        tap_prefix >&4) 3>&1) |
                        (read xs; exit $xs)) 4>>"$logfile" &&
                echo "ok $test_num $TEST_HDR_MSG") ||
-               (if [ $? -eq $skip_rc ]; then   \
+               (rc=$?; \
+               if [ $rc -eq $skip_rc ]; then   \
                        echo "not ok $test_num $TEST_HDR_MSG # SKIP"
+               elif [ $rc -eq $timeout_rc ]; then \
+                       echo "not ok $test_num $TEST_HDR_MSG # TIMEOUT"
                else
-                       echo "not ok $test_num $TEST_HDR_MSG"
+                       echo "not ok $test_num $TEST_HDR_MSG # exit=$rc"
                fi)
                cd - >/dev/null
        fi
index ec304463883c0b36b6e50b4c3eae474411c9f453..e2e1911d62d50470f54bdf560d3af140e7e974c9 100755 (executable)
@@ -24,12 +24,12 @@ main()
                echo "$0: Installing in specified location - $install_loc ..."
        fi
 
-       install_dir=$install_loc/kselftest
+       install_dir=$install_loc/kselftest_install
 
 # Create install directory
        mkdir -p $install_dir
 # Build tests
-       INSTALL_PATH=$install_dir make install
+       KSFT_INSTALL_PATH=$install_dir make install
 }
 
 main "$@"
index b35da375530afb1a3d2166aeac07c49f4cff11a7..409c1fa75e03575c2c04736b325b0484ae8fd65a 100644 (file)
@@ -1,4 +1,5 @@
 /s390x/sync_regs_test
+/s390x/memop
 /x86_64/cr4_cpuid_sync_test
 /x86_64/evmcs_test
 /x86_64/hyperv_cpuid
@@ -9,6 +10,7 @@
 /x86_64/state_test
 /x86_64/sync_regs_test
 /x86_64/vmx_close_while_nested_test
+/x86_64/vmx_dirty_log_test
 /x86_64/vmx_set_nested_state_test
 /x86_64/vmx_tsc_adjust_test
 /clear_dirty_log_test
index 62c591f87dabbe2b3810fad32f6ba86cc8292722..c5ec868fa1e523fc36419ef008722afb469349c8 100644 (file)
@@ -22,6 +22,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/smm_test
 TEST_GEN_PROGS_x86_64 += x86_64/state_test
 TEST_GEN_PROGS_x86_64 += x86_64/sync_regs_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_close_while_nested_test
+TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
 TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
 TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
@@ -48,7 +49,7 @@ CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \
        -I$(LINUX_HDR_PATH) -Iinclude -I$(<D) -Iinclude/$(UNAME_M) -I..
 
 no-pie-option := $(call try-run, echo 'int main() { return 0; }' | \
-        $(CC) -Werror $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -no-pie -x c - -o "$$TMP", -no-pie)
+        $(CC) -Werror -no-pie -x c - -o "$$TMP", -no-pie)
 
 # On s390, build the testcases KVM-enabled
 pgste-option = $(call try-run, echo 'int main() { return 0; }' | \
index 0c17f2ee685e50658f1ab5121f6156ebc8e4a27c..ff234018219cfcf883e020d86f4c4306c2591e5a 100644 (file)
@@ -1083,6 +1083,9 @@ void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits);
 #define VMX_BASIC_MEM_TYPE_WB  6LLU
 #define VMX_BASIC_INOUT                0x0040000000000000LLU
 
+/* VMX_EPT_VPID_CAP bits */
+#define VMX_EPT_VPID_CAP_AD_BITS       (1ULL << 21)
+
 /* MSR_IA32_VMX_MISC bits */
 #define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
 #define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
index 69b17055f63da7c7f10569c9b03361253860c670..f52e0ba84fedb0fe364998bda8da1e9d8f21ee1f 100644 (file)
@@ -569,6 +569,10 @@ struct vmx_pages {
        void *enlightened_vmcs_hva;
        uint64_t enlightened_vmcs_gpa;
        void *enlightened_vmcs;
+
+       void *eptp_hva;
+       uint64_t eptp_gpa;
+       void *eptp;
 };
 
 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
@@ -576,4 +580,16 @@ bool prepare_for_vmx_operation(struct vmx_pages *vmx);
 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
 bool load_vmcs(struct vmx_pages *vmx);
 
+void nested_vmx_check_supported(void);
+
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot);
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+                uint32_t eptp_memslot);
+void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
+                       uint32_t memslot, uint32_t eptp_memslot);
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint32_t eptp_memslot);
+
 #endif /* SELFTEST_KVM_VMX_H */
index 80a338b5403c3533325edb6dd93e263a38e8f644..41cf45416060fc6e9ce04fcb2d92d7511e62abb0 100644 (file)
@@ -705,7 +705,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
  *   on error (e.g. currently no memory region using memslot as a KVM
  *   memory slot ID).
  */
-static struct userspace_mem_region *
+struct userspace_mem_region *
 memslot2region(struct kvm_vm *vm, uint32_t memslot)
 {
        struct userspace_mem_region *region;
index f36262e0f655335677c445f37486c69f3b8c19b8..ac50c42750cff37a635ac5bbb410fbad79cfcbb8 100644 (file)
@@ -68,4 +68,7 @@ void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
 void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent);
 void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent);
 
+struct userspace_mem_region *
+memslot2region(struct kvm_vm *vm, uint32_t memslot);
+
 #endif /* SELFTEST_KVM_UTIL_INTERNAL_H */
index c53dbc6bc5689f13b511f000cbb4c6f53c36c044..6698cb741e10a47afad23e92d22490ed46ade598 100644 (file)
@@ -1085,7 +1085,7 @@ struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
        for (i = 0; i < nmsrs; i++)
                state->msrs.entries[i].index = list->indices[i];
        r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
-        TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed at %x)",
+        TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
                 r, r == nmsrs ? -1 : list->indices[r]);
 
        r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
index 9cef0455b819c39688bc57551d99f8fb76abf4be..f6ec97b7eaef67b6cbce47f1d6a7fa0fb3ef1e0e 100644 (file)
@@ -7,11 +7,39 @@
 
 #include "test_util.h"
 #include "kvm_util.h"
+#include "../kvm_util_internal.h"
 #include "processor.h"
 #include "vmx.h"
 
+#define PAGE_SHIFT_4K  12
+
+#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000
+
 bool enable_evmcs;
 
+struct eptPageTableEntry {
+       uint64_t readable:1;
+       uint64_t writable:1;
+       uint64_t executable:1;
+       uint64_t memory_type:3;
+       uint64_t ignore_pat:1;
+       uint64_t page_size:1;
+       uint64_t accessed:1;
+       uint64_t dirty:1;
+       uint64_t ignored_11_10:2;
+       uint64_t address:40;
+       uint64_t ignored_62_52:11;
+       uint64_t suppress_ve:1;
+};
+
+struct eptPageTablePointer {
+       uint64_t memory_type:3;
+       uint64_t page_walk_length:3;
+       uint64_t ad_enabled:1;
+       uint64_t reserved_11_07:5;
+       uint64_t address:40;
+       uint64_t reserved_63_52:12;
+};
 int vcpu_enable_evmcs(struct kvm_vm *vm, int vcpu_id)
 {
        uint16_t evmcs_ver;
@@ -174,15 +202,35 @@ bool load_vmcs(struct vmx_pages *vmx)
  */
 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
 {
+       uint32_t sec_exec_ctl = 0;
+
        vmwrite(VIRTUAL_PROCESSOR_ID, 0);
        vmwrite(POSTED_INTR_NV, 0);
 
        vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
-       if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0))
+
+       if (vmx->eptp_gpa) {
+               uint64_t ept_paddr;
+               struct eptPageTablePointer eptp = {
+                       .memory_type = VMX_BASIC_MEM_TYPE_WB,
+                       .page_walk_length = 3, /* + 1 */
+                       .ad_enabled = !!(rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & VMX_EPT_VPID_CAP_AD_BITS),
+                       .address = vmx->eptp_gpa >> PAGE_SHIFT_4K,
+               };
+
+               memcpy(&ept_paddr, &eptp, sizeof(ept_paddr));
+               vmwrite(EPT_POINTER, ept_paddr);
+               sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT;
+       }
+
+       if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl))
                vmwrite(CPU_BASED_VM_EXEC_CONTROL,
                        rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
-       else
+       else {
                vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
+               GUEST_ASSERT(!sec_exec_ctl);
+       }
+
        vmwrite(EXCEPTION_BITMAP, 0);
        vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
        vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
@@ -327,3 +375,162 @@ void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
        init_vmcs_host_state();
        init_vmcs_guest_state(guest_rip, guest_rsp);
 }
+
+void nested_vmx_check_supported(void)
+{
+       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
+
+       if (!(entry->ecx & CPUID_VMX)) {
+               fprintf(stderr, "nested VMX not enabled, skipping test\n");
+               exit(KSFT_SKIP);
+       }
+}
+
+void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+                  uint64_t nested_paddr, uint64_t paddr, uint32_t eptp_memslot)
+{
+       uint16_t index[4];
+       struct eptPageTableEntry *pml4e;
+
+       TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
+                   "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
+
+       TEST_ASSERT((nested_paddr % vm->page_size) == 0,
+                   "Nested physical address not on page boundary,\n"
+                   "  nested_paddr: 0x%lx vm->page_size: 0x%x",
+                   nested_paddr, vm->page_size);
+       TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn,
+                   "Physical address beyond beyond maximum supported,\n"
+                   "  nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->max_gfn, vm->page_size);
+       TEST_ASSERT((paddr % vm->page_size) == 0,
+                   "Physical address not on page boundary,\n"
+                   "  paddr: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->page_size);
+       TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
+                   "Physical address beyond beyond maximum supported,\n"
+                   "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
+                   paddr, vm->max_gfn, vm->page_size);
+
+       index[0] = (nested_paddr >> 12) & 0x1ffu;
+       index[1] = (nested_paddr >> 21) & 0x1ffu;
+       index[2] = (nested_paddr >> 30) & 0x1ffu;
+       index[3] = (nested_paddr >> 39) & 0x1ffu;
+
+       /* Allocate page directory pointer table if not present. */
+       pml4e = vmx->eptp_hva;
+       if (!pml4e[index[3]].readable) {
+               pml4e[index[3]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pml4e[index[3]].writable = true;
+               pml4e[index[3]].readable = true;
+               pml4e[index[3]].executable = true;
+       }
+
+       /* Allocate page directory table if not present. */
+       struct eptPageTableEntry *pdpe;
+       pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
+       if (!pdpe[index[2]].readable) {
+               pdpe[index[2]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pdpe[index[2]].writable = true;
+               pdpe[index[2]].readable = true;
+               pdpe[index[2]].executable = true;
+       }
+
+       /* Allocate page table if not present. */
+       struct eptPageTableEntry *pde;
+       pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
+       if (!pde[index[1]].readable) {
+               pde[index[1]].address = vm_phy_page_alloc(vm,
+                         KVM_EPT_PAGE_TABLE_MIN_PADDR, eptp_memslot)
+                       >> vm->page_shift;
+               pde[index[1]].writable = true;
+               pde[index[1]].readable = true;
+               pde[index[1]].executable = true;
+       }
+
+       /* Fill in page table entry. */
+       struct eptPageTableEntry *pte;
+       pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
+       pte[index[0]].address = paddr >> vm->page_shift;
+       pte[index[0]].writable = true;
+       pte[index[0]].readable = true;
+       pte[index[0]].executable = true;
+
+       /*
+        * For now mark these as accessed and dirty because the only
+        * testcase we have needs that.  Can be reconsidered later.
+        */
+       pte[index[0]].accessed = true;
+       pte[index[0]].dirty = true;
+}
+
+/*
+ * Map a range of EPT guest physical addresses to the VM's physical address
+ *
+ * Input Args:
+ *   vm - Virtual Machine
+ *   nested_paddr - Nested guest physical address to map
+ *   paddr - VM Physical Address
+ *   size - The size of the range to map
+ *   eptp_memslot - Memory region slot for new virtual translation tables
+ *
+ * Output Args: None
+ *
+ * Return: None
+ *
+ * Within the VM given by vm, creates a nested guest translation for the
+ * page range starting at nested_paddr to the page range starting at paddr.
+ */
+void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
+               uint64_t nested_paddr, uint64_t paddr, uint64_t size,
+               uint32_t eptp_memslot)
+{
+       size_t page_size = vm->page_size;
+       size_t npages = size / page_size;
+
+       TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow");
+       TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
+
+       while (npages--) {
+               nested_pg_map(vmx, vm, nested_paddr, paddr, eptp_memslot);
+               nested_paddr += page_size;
+               paddr += page_size;
+       }
+}
+
+/* Prepare an identity extended page table that maps all the
+ * physical pages in VM.
+ */
+void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
+                       uint32_t memslot, uint32_t eptp_memslot)
+{
+       sparsebit_idx_t i, last;
+       struct userspace_mem_region *region =
+               memslot2region(vm, memslot);
+
+       i = (region->region.guest_phys_addr >> vm->page_shift) - 1;
+       last = i + (region->region.memory_size >> vm->page_shift);
+       for (;;) {
+               i = sparsebit_next_clear(region->unused_phy_pages, i);
+               if (i > last)
+                       break;
+
+               nested_map(vmx, vm,
+                          (uint64_t)i << vm->page_shift,
+                          (uint64_t)i << vm->page_shift,
+                          1 << vm->page_shift,
+                          eptp_memslot);
+       }
+}
+
+void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
+                 uint32_t eptp_memslot)
+{
+       vmx->eptp = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
+       vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp);
+       vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp);
+}
index 11c2a70a7b87a5df60772eefee2284be3376eaf1..5c82242562943fe36e14ff3bb24a4745a16ce76b 100644 (file)
 
 #define VCPU_ID 5
 
+#define UCALL_PIO_PORT ((uint16_t)0x1000)
+
+/*
+ * ucall is embedded here to protect against compiler reshuffling registers
+ * before calling a function. In this test we only need to get KVM_EXIT_IO
+ * vmexit and preserve RBX, no additional information is needed.
+ */
 void guest_code(void)
 {
-       /*
-        * use a callee-save register, otherwise the compiler
-        * saves it around the call to GUEST_SYNC.
-        */
-       register u32 stage asm("rbx");
-       for (;;) {
-               GUEST_SYNC(0);
-               stage++;
-               asm volatile ("" : : "r" (stage));
-       }
+       asm volatile("1: in %[port], %%al\n"
+                    "add $0x1, %%rbx\n"
+                    "jmp 1b"
+                    : : [port] "d" (UCALL_PIO_PORT) : "rax", "rbx");
 }
 
 static void compare_regs(struct kvm_regs *left, struct kvm_regs *right)
index 3b0ffe01dacd06e986bb1a66999af64ef5a75dae..5dfb53546a266d3681e32d7e2fd1fcb42fb3166f 100644 (file)
@@ -53,12 +53,8 @@ static void l1_guest_code(struct vmx_pages *vmx_pages)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c
new file mode 100644 (file)
index 0000000..a223a64
--- /dev/null
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KVM dirty page logging test
+ *
+ * Copyright (C) 2018, Red Hat, Inc.
+ */
+
+#define _GNU_SOURCE /* for program_invocation_name */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "vmx.h"
+
+#define VCPU_ID                                1
+
+/* The memory slot index to track dirty pages */
+#define TEST_MEM_SLOT_INDEX            1
+#define TEST_MEM_SIZE                  3
+
+/* L1 guest test virtual memory offset */
+#define GUEST_TEST_MEM                 0xc0000000
+
+/* L2 guest test virtual memory offset */
+#define NESTED_TEST_MEM1               0xc0001000
+#define NESTED_TEST_MEM2               0xc0002000
+
+static void l2_guest_code(void)
+{
+       *(volatile uint64_t *)NESTED_TEST_MEM1;
+       *(volatile uint64_t *)NESTED_TEST_MEM1 = 1;
+       GUEST_SYNC(true);
+       GUEST_SYNC(false);
+
+       *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+       GUEST_SYNC(true);
+       *(volatile uint64_t *)NESTED_TEST_MEM2 = 1;
+       GUEST_SYNC(true);
+       GUEST_SYNC(false);
+
+       /* Exit to L1 and never come back.  */
+       vmcall();
+}
+
+void l1_guest_code(struct vmx_pages *vmx)
+{
+#define L2_GUEST_STACK_SIZE 64
+       unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+       GUEST_ASSERT(vmx->vmcs_gpa);
+       GUEST_ASSERT(prepare_for_vmx_operation(vmx));
+       GUEST_ASSERT(load_vmcs(vmx));
+
+       prepare_vmcs(vmx, l2_guest_code,
+                    &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+       GUEST_SYNC(false);
+       GUEST_ASSERT(!vmlaunch());
+       GUEST_SYNC(false);
+       GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
+       GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+       vm_vaddr_t vmx_pages_gva = 0;
+       struct vmx_pages *vmx;
+       unsigned long *bmap;
+       uint64_t *host_test_mem;
+
+       struct kvm_vm *vm;
+       struct kvm_run *run;
+       struct ucall uc;
+       bool done = false;
+
+       nested_vmx_check_supported();
+
+       /* Create VM */
+       vm = vm_create_default(VCPU_ID, 0, l1_guest_code);
+       vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+       vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva);
+       vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
+       run = vcpu_state(vm, VCPU_ID);
+
+       /* Add an extra memory slot for testing dirty logging */
+       vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
+                                   GUEST_TEST_MEM,
+                                   TEST_MEM_SLOT_INDEX,
+                                   TEST_MEM_SIZE,
+                                   KVM_MEM_LOG_DIRTY_PAGES);
+
+       /*
+        * Add an identity map for GVA range [0xc0000000, 0xc0002000).  This
+        * affects both L1 and L2.  However...
+        */
+       virt_map(vm, GUEST_TEST_MEM, GUEST_TEST_MEM,
+                TEST_MEM_SIZE * 4096, 0);
+
+       /*
+        * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
+        * 0xc0000000.
+        *
+        * Note that prepare_eptp should be called only L1's GPA map is done,
+        * meaning after the last call to virt_map.
+        */
+       prepare_eptp(vmx, vm, 0);
+       nested_map_memslot(vmx, vm, 0, 0);
+       nested_map(vmx, vm, NESTED_TEST_MEM1, GUEST_TEST_MEM, 4096, 0);
+       nested_map(vmx, vm, NESTED_TEST_MEM2, GUEST_TEST_MEM, 4096, 0);
+
+       bmap = bitmap_alloc(TEST_MEM_SIZE);
+       host_test_mem = addr_gpa2hva(vm, GUEST_TEST_MEM);
+
+       while (!done) {
+               memset(host_test_mem, 0xaa, TEST_MEM_SIZE * 4096);
+               _vcpu_run(vm, VCPU_ID);
+               TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+                           "Unexpected exit reason: %u (%s),\n",
+                           run->exit_reason,
+                           exit_reason_str(run->exit_reason));
+
+               switch (get_ucall(vm, VCPU_ID, &uc)) {
+               case UCALL_ABORT:
+                       TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
+                                   __FILE__, uc.args[1]);
+                       /* NOT REACHED */
+               case UCALL_SYNC:
+                       /*
+                        * The nested guest wrote at offset 0x1000 in the memslot, but the
+                        * dirty bitmap must be filled in according to L1 GPA, not L2.
+                        */
+                       kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
+                       if (uc.args[1]) {
+                               TEST_ASSERT(test_bit(0, bmap), "Page 0 incorrectly reported clean\n");
+                               TEST_ASSERT(host_test_mem[0] == 1, "Page 0 not written by guest\n");
+                       } else {
+                               TEST_ASSERT(!test_bit(0, bmap), "Page 0 incorrectly reported dirty\n");
+                               TEST_ASSERT(host_test_mem[0] == 0xaaaaaaaaaaaaaaaaULL, "Page 0 written by guest\n");
+                       }
+
+                       TEST_ASSERT(!test_bit(1, bmap), "Page 1 incorrectly reported dirty\n");
+                       TEST_ASSERT(host_test_mem[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 1 written by guest\n");
+                       TEST_ASSERT(!test_bit(2, bmap), "Page 2 incorrectly reported dirty\n");
+                       TEST_ASSERT(host_test_mem[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL, "Page 2 written by guest\n");
+                       break;
+               case UCALL_DONE:
+                       done = true;
+                       break;
+               default:
+                       TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
+               }
+       }
+}
index 853e370e8a3942c559925f37d0387e68a81aeef0..9ef7fab39d4878b937a435dce9fa7859dc687f39 100644 (file)
@@ -224,7 +224,6 @@ int main(int argc, char *argv[])
 {
        struct kvm_vm *vm;
        struct kvm_nested_state state;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
        have_evmcs = kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS);
 
@@ -237,10 +236,7 @@ int main(int argc, char *argv[])
         * AMD currently does not implement set_nested_state, so for now we
         * just early out.
         */
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, 0);
 
@@ -271,12 +267,7 @@ int main(int argc, char *argv[])
        state.flags = KVM_STATE_NESTED_RUN_PENDING;
        test_nested_state_expect_einval(vm, &state);
 
-       /*
-        * TODO: When SVM support is added for KVM_SET_NESTED_STATE
-        *       add tests here to support it like VMX.
-        */
-       if (entry->ecx & CPUID_VMX)
-               test_vmx_nested_state(vm);
+       test_vmx_nested_state(vm);
 
        kvm_vm_free(vm);
        return 0;
index f36c10eba71edc6b4b1786d4976a4a11e774c20b..5590fd2bcf87d450e22946ab378620d043094533 100644 (file)
@@ -128,12 +128,8 @@ static void report(int64_t val)
 int main(int argc, char *argv[])
 {
        vm_vaddr_t vmx_pages_gva;
-       struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 
-       if (!(entry->ecx & CPUID_VMX)) {
-               fprintf(stderr, "nested VMX not enabled, skipping test\n");
-               exit(KSFT_SKIP);
-       }
+       nested_vmx_check_supported();
 
        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
index c7cced739c34bcada309cae1ce7607af04637887..8aefd81fbc86224863dd7aaadaba5c195ba46811 100644 (file)
@@ -21,3 +21,4 @@ ipv6_flowlabel
 ipv6_flowlabel_mgr
 so_txtime
 tcp_fastopen_backup_key
+nettest
index c4ba0ff4a53faefa225ef847fcb544f6cea6510d..76c1897e6352fefd9f5962549a7ac937c249b883 100755 (executable)
@@ -1438,6 +1438,27 @@ ipv4_addr_metric_test()
        fi
        log_test $rc 0 "Prefix route with metric on link up"
 
+       # explicitly check for metric changes on edge scenarios
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.0/24 metric 259"
+       run_cmd "$IP addr change dev dummy2 172.16.104.0/24 metric 260"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.0/24 dev dummy2 proto kernel scope link src 172.16.104.0 metric 260"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of .0/24 address"
+
+       run_cmd "$IP addr flush dev dummy2"
+       run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
+       run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
+       rc=$?
+       if [ $rc -eq 0 ]; then
+               check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
+               rc=$?
+       fi
+       log_test $rc 0 "Modify metric of address with peer route"
+
        $IP li del dummy1
        $IP li del dummy2
        cleanup
old mode 100644 (file)
new mode 100755 (executable)
index fe3230c5598639f30586142fac2f1eda851c7c3d..fb7a59ed759ebbef223bfe92e85acd33c0b4c597 100644 (file)
@@ -129,7 +129,7 @@ static void test(int *rcv_fds, int count, int proto)
 {
        struct epoll_event ev;
        int epfd, i, test_fd;
-       uint16_t test_family;
+       int test_family;
        socklen_t len;
 
        epfd = epoll_create(1);
@@ -146,6 +146,7 @@ static void test(int *rcv_fds, int count, int proto)
        send_from_v4(proto);
 
        test_fd = receive_once(epfd, proto);
+       len = sizeof(test_family);
        if (getsockopt(test_fd, SOL_SOCKET, SO_DOMAIN, &test_family, &len))
                error(1, errno, "failed to read socket domain");
        if (test_family != AF_INET)
index b8265ee9923f2b11af7884d365b2e5af9323ff3a..614b31aad168b35564e91d9cfaa364e06e73762c 100644 (file)
@@ -89,12 +89,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V4,
                .gso_len = CONST_MSS_V4,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -139,10 +136,9 @@ struct testcase testcases_v4[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -196,12 +192,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single MSS: will fail with GSO, because the segment
-                * logic in udp4_ufo_fragment demands a gso skb to be > MTU
-                */
+               /* send a single MSS: will fall back to no GSO */
                .tlen = CONST_MSS_V6,
                .gso_len = CONST_MSS_V6,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
@@ -246,10 +239,9 @@ struct testcase testcases_v6[] = {
                .tfail = true,
        },
        {
-               /* send a single 1B MSS: will fail, see single MSS above */
+               /* send a single 1B MSS: will fall back to no GSO */
                .tlen = 1,
                .gso_len = 1,
-               .tfail = true,
                .r_num_mss = 1,
        },
        {
index 464c9b76148f1538643fd512e35461f327ea1249..7550f08822a3345290e2c780517c0b7cc373d43d 100644 (file)
@@ -1,5 +1,5 @@
 # SPDX-License-Identifier: GPL-2.0-only
-CFLAGS += -g -I../../../../usr/include/ -lpthread
+CFLAGS += -g -I../../../../usr/include/ -pthread
 
 TEST_GEN_PROGS := pidfd_test pidfd_open_test pidfd_poll_test pidfd_wait
 
index 9868a5ddd84799410698f3bdb78f16a4d476eb4f..f85a0938ab25fefd2fa7e1214fd600a571b85456 100644 (file)
@@ -636,7 +636,7 @@ int main(int argc, char *argv[])
                        nrthreads = strtoul(optarg, NULL, 10);
                        break;
                case 'l':
-                       strncpy(logdir, optarg, LOGDIR_NAME_SIZE);
+                       strncpy(logdir, optarg, LOGDIR_NAME_SIZE - 1);
                        break;
                case 't':
                        run_time = strtoul(optarg, NULL, 10);
diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings
new file mode 100644 (file)
index 0000000..ba4d85f
--- /dev/null
@@ -0,0 +1 @@
+timeout=90
index c0534e298b5128d9bc2800abb5b59d7d5379f75e..485cf06ef013f0183d695b35099f4409d9c5f6f4 100644 (file)
@@ -37,7 +37,7 @@ int main(int argc, char **argv)
        char *file = "/dev/zero";
        char *p;
 
-       while ((opt = getopt(argc, argv, "m:r:n:f:tTLUSH")) != -1) {
+       while ((opt = getopt(argc, argv, "m:r:n:f:tTLUwSH")) != -1) {
                switch (opt) {
                case 'm':
                        size = atoi(optarg) * MB;
@@ -71,7 +71,7 @@ int main(int argc, char **argv)
                        flags |= MAP_SHARED;
                        break;
                case 'H':
-                       flags |= MAP_HUGETLB;
+                       flags |= (MAP_HUGETLB | MAP_ANONYMOUS);
                        break;
                default:
                        return -1;
index afff120c7be6e57496de9d835b38acf4b89930a3..f45e510500c0d7846c354513804404811d093417 100644 (file)
@@ -19,7 +19,7 @@
 
 int fd;
 const char v = 'V';
-static const char sopts[] = "bdehp:t:Tn:NLf:";
+static const char sopts[] = "bdehp:t:Tn:NLf:i";
 static const struct option lopts[] = {
        {"bootstatus",          no_argument, NULL, 'b'},
        {"disable",             no_argument, NULL, 'd'},
@@ -32,6 +32,7 @@ static const struct option lopts[] = {
        {"getpretimeout",       no_argument, NULL, 'N'},
        {"gettimeleft",         no_argument, NULL, 'L'},
        {"file",          required_argument, NULL, 'f'},
+       {"info",                no_argument, NULL, 'i'},
        {NULL,                  no_argument, NULL, 0x0}
 };
 
@@ -72,6 +73,7 @@ static void usage(char *progname)
        printf("Usage: %s [options]\n", progname);
        printf(" -f, --file\t\tOpen watchdog device file\n");
        printf("\t\t\tDefault is /dev/watchdog\n");
+       printf(" -i, --info\t\tShow watchdog_info\n");
        printf(" -b, --bootstatus\tGet last boot status (Watchdog/POR)\n");
        printf(" -d, --disable\t\tTurn off the watchdog timer\n");
        printf(" -e, --enable\t\tTurn on the watchdog timer\n");
@@ -97,6 +99,7 @@ int main(int argc, char *argv[])
        int c;
        int oneshot = 0;
        char *file = "/dev/watchdog";
+       struct watchdog_info info;
 
        setbuf(stdout, NULL);
 
@@ -118,6 +121,16 @@ int main(int argc, char *argv[])
                exit(-1);
        }
 
+       /*
+        * Validate that `file` is a watchdog device
+        */
+       ret = ioctl(fd, WDIOC_GETSUPPORT, &info);
+       if (ret) {
+               printf("WDIOC_GETSUPPORT error '%s'\n", strerror(errno));
+               close(fd);
+               exit(ret);
+       }
+
        optind = 0;
 
        while ((c = getopt_long(argc, argv, sopts, lopts, NULL)) != -1) {
@@ -205,6 +218,18 @@ int main(int argc, char *argv[])
                case 'f':
                        /* Handled above */
                        break;
+               case 'i':
+                       /*
+                        * watchdog_info was obtained as part of file open
+                        * validation. So we just show it here.
+                        */
+                       oneshot = 1;
+                       printf("watchdog_info:\n");
+                       printf(" identity:\t\t%s\n", info.identity);
+                       printf(" firmware_version:\t%u\n",
+                              info.firmware_version);
+                       printf(" options:\t\t%08x\n", info.options);
+                       break;
 
                default:
                        usage(argv[0]);
index 051d7d3f443b4f6663d38b5c190f9293ac51f359..927a151fa9aa9e52c5c721f20c34d1682575b17f 100644 (file)
@@ -69,7 +69,7 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
        FILE *fd = NULL;
        struct udev_device *plat;
        const char *speed;
-       int ret = 0;
+       size_t ret;
 
        plat = udev_device_get_parent(sdev);
        path = udev_device_get_syspath(plat);
@@ -79,8 +79,10 @@ int read_usb_vudc_device(struct udev_device *sdev, struct usbip_usb_device *dev)
        if (!fd)
                return -1;
        ret = fread((char *) &descr, sizeof(descr), 1, fd);
-       if (ret < 0)
+       if (ret != 1) {
+               err("Cannot read vudc device descr file: %s", strerror(errno));
                goto err;
+       }
        fclose(fd);
 
        copy_descr_attr(dev, &descr, bDeviceClass);
diff --git a/tools/virtio/crypto/hash.h b/tools/virtio/crypto/hash.h
new file mode 100644 (file)
index 0000000..e69de29
index f91aeb5fe57162f1fba03d93d26903fe14200a8f..8f41cd6bd5c088d9c9d0828331a139cf2cc94316 100644 (file)
@@ -29,4 +29,6 @@ enum dma_data_direction {
 #define dma_unmap_single(...) do { } while (0)
 #define dma_unmap_page(...) do { } while (0)
 
+#define dma_max_mapping_size(...) SIZE_MAX
+
 #endif
diff --git a/tools/virtio/xen/xen.h b/tools/virtio/xen/xen.h
new file mode 100644 (file)
index 0000000..f569387
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef XEN_XEN_STUB_H
+#define XEN_XEN_STUB_H
+
+#define xen_domain() 0
+
+#endif
index c9449aaf438dc9c273610cbb83bcdccb1bb98c53..57b20f7b672953a288aa069f05cfec8d632d2770 100644 (file)
@@ -29,13 +29,11 @@ header-test- += linux/android/binderfs.h
 header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
 header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
 header-test- += linux/coda.h
-header-test- += linux/coda_psdev.h
 header-test- += linux/elfcore.h
 header-test- += linux/errqueue.h
 header-test- += linux/fsmap.h
 header-test- += linux/hdlc/ioctl.h
 header-test- += linux/ivtv.h
-header-test- += linux/jffs2.h
 header-test- += linux/kexec.h
 header-test- += linux/matroxfb.h
 header-test- += linux/netfilter_ipv4/ipt_LOG.h
@@ -55,20 +53,12 @@ header-test- += linux/v4l2-mediabus.h
 header-test- += linux/v4l2-subdev.h
 header-test- += linux/videodev2.h
 header-test- += linux/vm_sockets.h
-header-test- += scsi/scsi_bsg_fc.h
-header-test- += scsi/scsi_netlink.h
-header-test- += scsi/scsi_netlink_fc.h
 header-test- += sound/asequencer.h
 header-test- += sound/asoc.h
 header-test- += sound/asound.h
 header-test- += sound/compress_offload.h
 header-test- += sound/emu10k1.h
 header-test- += sound/sfnt_info.h
-header-test- += sound/sof/eq.h
-header-test- += sound/sof/fw.h
-header-test- += sound/sof/header.h
-header-test- += sound/sof/manifest.h
-header-test- += sound/sof/trace.h
 header-test- += xen/evtchn.h
 header-test- += xen/gntdev.h
 header-test- += xen/privcmd.h
index 362a01886bab9ef64ca89b61de1ff93ddf26e1bb..8731dfeced8b7ea4f2bb19e19d833330914be2d8 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/kvm.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
+#include <linux/perf/arm_pmu.h>
 #include <linux/uaccess.h>
 #include <asm/kvm_emulate.h>
 #include <kvm/arm_pmu.h>
@@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
        if (kvm_pmu_pmc_is_chained(pmc) &&
            kvm_pmu_idx_is_high_counter(select_idx))
                counter = upper_32_bits(counter);
-
-       else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
+       else if (select_idx != ARMV8_PMU_CYCLE_IDX)
                counter = lower_32_bits(counter);
 
        return counter;
@@ -193,7 +193,7 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
  */
 static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 {
-       u64 counter, reg;
+       u64 counter, reg, val;
 
        pmc = kvm_pmu_get_canonical_pmc(pmc);
        if (!pmc->perf_event)
@@ -201,16 +201,19 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
 
        counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);
 
-       if (kvm_pmu_pmc_is_chained(pmc)) {
-               reg = PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
-               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+       if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
+               reg = PMCCNTR_EL0;
+               val = counter;
        } else {
-               reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
-                      ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
-               __vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
+               reg = PMEVCNTR0_EL0 + pmc->idx;
+               val = lower_32_bits(counter);
        }
 
+       __vcpu_sys_reg(vcpu, reg) = val;
+
+       if (kvm_pmu_pmc_is_chained(pmc))
+               __vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
+
        kvm_pmu_release_perf_event(pmc);
 }
 
@@ -440,8 +443,25 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                                  struct pt_regs *regs)
 {
        struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+       struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
        struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
        int idx = pmc->idx;
+       u64 period;
+
+       cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);
+
+       /*
+        * Reset the sample period to the architectural limit,
+        * i.e. the point where the counter overflows.
+        */
+       period = -(local64_read(&perf_event->count));
+
+       if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
+               period &= GENMASK(31, 0);
+
+       local64_set(&perf_event->hw.period_left, 0);
+       perf_event->attr.sample_period = period;
+       perf_event->hw.sample_period = period;
 
        __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);
 
@@ -449,6 +469,8 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
                kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
                kvm_vcpu_kick(vcpu);
        }
+
+       cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
 }
 
 /**
@@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
                 * high counter.
                 */
                attr.sample_period = (-counter) & GENMASK(63, 0);
+               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
+                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
+
                event = perf_event_create_kernel_counter(&attr, -1, current,
                                                         kvm_pmu_perf_overflow,
                                                         pmc + 1);
-
-               if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
-                       attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
        } else {
                /* The initial sample period (overflow count) of an event. */
                if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
index 55fed77a9f739e1f07dba71b010ac29c72e92cc7..4fd4f6db181b0b3f911e964c9bc5c763ce7ef638 100644 (file)
@@ -30,7 +30,7 @@ TRACE_EVENT(vgic_update_irq_pending,
 #endif /* _TRACE_VGIC_H */
 
 #undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../../virt/kvm/arm/vgic
+#define TRACE_INCLUDE_PATH ../../virt/kvm/arm/vgic
 #undef TRACE_INCLUDE_FILE
 #define TRACE_INCLUDE_FILE trace
 
index e6de3159e682fca1d48520890d7c2452bc222583..d6f0696d98efe4738b470e4a92ca24ca33d54d61 100644 (file)
@@ -617,8 +617,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 
                stat_data->kvm = kvm;
                stat_data->offset = p->offset;
+               stat_data->mode = p->mode ? p->mode : 0644;
                kvm->debugfs_stat_data[p - debugfs_entries] = stat_data;
-               debugfs_create_file(p->name, 0644, kvm->debugfs_dentry,
+               debugfs_create_file(p->name, stat_data->mode, kvm->debugfs_dentry,
                                    stat_data, stat_fops_per_vm[p->kind]);
        }
        return 0;
@@ -626,8 +627,9 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, int fd)
 
 static struct kvm *kvm_create_vm(unsigned long type)
 {
-       int r, i;
        struct kvm *kvm = kvm_arch_alloc_vm();
+       int r = -ENOMEM;
+       int i;
 
        if (!kvm)
                return ERR_PTR(-ENOMEM);
@@ -639,44 +641,45 @@ static struct kvm *kvm_create_vm(unsigned long type)
        mutex_init(&kvm->lock);
        mutex_init(&kvm->irq_lock);
        mutex_init(&kvm->slots_lock);
-       refcount_set(&kvm->users_count, 1);
        INIT_LIST_HEAD(&kvm->devices);
 
-       r = kvm_arch_init_vm(kvm, type);
-       if (r)
-               goto out_err_no_disable;
-
-       r = hardware_enable_all();
-       if (r)
-               goto out_err_no_disable;
-
-#ifdef CONFIG_HAVE_KVM_IRQFD
-       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
-#endif
-
        BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
 
-       r = -ENOMEM;
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                struct kvm_memslots *slots = kvm_alloc_memslots();
+
                if (!slots)
-                       goto out_err_no_srcu;
+                       goto out_err_no_arch_destroy_vm;
                /* Generations must be different for each address space. */
                slots->generation = i;
                rcu_assign_pointer(kvm->memslots[i], slots);
        }
 
-       if (init_srcu_struct(&kvm->srcu))
-               goto out_err_no_srcu;
-       if (init_srcu_struct(&kvm->irq_srcu))
-               goto out_err_no_irq_srcu;
        for (i = 0; i < KVM_NR_BUSES; i++) {
                rcu_assign_pointer(kvm->buses[i],
                        kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
                if (!kvm->buses[i])
-                       goto out_err;
+                       goto out_err_no_arch_destroy_vm;
        }
 
+       refcount_set(&kvm->users_count, 1);
+       r = kvm_arch_init_vm(kvm, type);
+       if (r)
+               goto out_err_no_arch_destroy_vm;
+
+       r = hardware_enable_all();
+       if (r)
+               goto out_err_no_disable;
+
+#ifdef CONFIG_HAVE_KVM_IRQFD
+       INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
+#endif
+
+       if (init_srcu_struct(&kvm->srcu))
+               goto out_err_no_srcu;
+       if (init_srcu_struct(&kvm->irq_srcu))
+               goto out_err_no_irq_srcu;
+
        r = kvm_init_mmu_notifier(kvm);
        if (r)
                goto out_err;
@@ -696,7 +699,9 @@ out_err_no_irq_srcu:
 out_err_no_srcu:
        hardware_disable_all();
 out_err_no_disable:
-       refcount_set(&kvm->users_count, 0);
+       kvm_arch_destroy_vm(kvm);
+       WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
+out_err_no_arch_destroy_vm:
        for (i = 0; i < KVM_NR_BUSES; i++)
                kfree(kvm_get_bus(kvm, i));
        for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
@@ -2359,20 +2364,23 @@ out:
        kvm_arch_vcpu_unblocking(vcpu);
        block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
 
-       if (!vcpu_valid_wakeup(vcpu))
-               shrink_halt_poll_ns(vcpu);
-       else if (halt_poll_ns) {
-               if (block_ns <= vcpu->halt_poll_ns)
-                       ;
-               /* we had a long block, shrink polling */
-               else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+       if (!kvm_arch_no_poll(vcpu)) {
+               if (!vcpu_valid_wakeup(vcpu)) {
                        shrink_halt_poll_ns(vcpu);
-               /* we had a short halt and our poll time is too small */
-               else if (vcpu->halt_poll_ns < halt_poll_ns &&
-                       block_ns < halt_poll_ns)
-                       grow_halt_poll_ns(vcpu);
-       } else
-               vcpu->halt_poll_ns = 0;
+               } else if (halt_poll_ns) {
+                       if (block_ns <= vcpu->halt_poll_ns)
+                               ;
+                       /* we had a long block, shrink polling */
+                       else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+                               shrink_halt_poll_ns(vcpu);
+                       /* we had a short halt and our poll time is too small */
+                       else if (vcpu->halt_poll_ns < halt_poll_ns &&
+                               block_ns < halt_poll_ns)
+                               grow_halt_poll_ns(vcpu);
+               } else {
+                       vcpu->halt_poll_ns = 0;
+               }
+       }
 
        trace_kvm_vcpu_wakeup(block_ns, waited, vcpu_valid_wakeup(vcpu));
        kvm_arch_vcpu_block_finish(vcpu);
@@ -3929,7 +3937,9 @@ static int kvm_debugfs_open(struct inode *inode, struct file *file,
        if (!refcount_inc_not_zero(&stat_data->kvm->users_count))
                return -ENOENT;
 
-       if (simple_attr_open(inode, file, get, set, fmt)) {
+       if (simple_attr_open(inode, file, get,
+                            stat_data->mode & S_IWUGO ? set : NULL,
+                            fmt)) {
                kvm_put_kvm(stat_data->kvm);
                return -ENOMEM;
        }
@@ -4177,7 +4187,8 @@ static void kvm_init_debug(void)
 
        kvm_debugfs_num_entries = 0;
        for (p = debugfs_entries; p->name; ++p, kvm_debugfs_num_entries++) {
-               debugfs_create_file(p->name, 0644, kvm_debugfs_dir,
+               int mode = p->mode ? p->mode : 0644;
+               debugfs_create_file(p->name, mode, kvm_debugfs_dir,
                                    (void *)(long)p->offset,
                                    stat_fops[p->kind]);
        }